Migrate shape infers to new IStaticShapeInfer API (#18579)

* Add static shape adapter
- Adapters holds CPU dimension which can be reference to it or vector
- Add ov::optional for holding optional result from shape inference
- Add new `infer` function in `IStaticShapeInfer`

* Temporary support of StaticShape

* Minor corrections in ShapeInferenceTA

* Migrate shape_infer to new interface version

* Replace StaticShape by adapter implementation

* Replace IShapeInferCommon by IStaticShapeInfer

* Correct code formatting

* Fix build issues

* NodeValidationFailure::create for StaticShapeRef
This commit is contained in:
Pawel Raasz 2023-07-24 15:59:18 +02:00 committed by GitHub
parent ec8fe6353a
commit 5eab1be682
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
209 changed files with 2014 additions and 2968 deletions

View File

@ -66,8 +66,7 @@ void ov::op::internal::AUGRUCell::validate_and_infer_types() {
OPENVINO_SUPPRESS_DEPRECATED_START OPENVINO_SUPPRESS_DEPRECATED_START
const auto input_shapes = get_node_input_partial_shapes(*this); const auto input_shapes = get_node_input_partial_shapes(*this);
OPENVINO_SUPPRESS_DEPRECATED_END OPENVINO_SUPPRESS_DEPRECATED_END
std::vector<ov::PartialShape> output_shapes = {ov::PartialShape::dynamic(2)}; std::vector<ov::PartialShape> output_shapes = shape_infer(this, input_shapes);
shape_infer(this, input_shapes, output_shapes);
// Set output type and shape // Set output type and shape
set_output_type(0, result_et, output_shapes[0]); set_output_type(0, result_et, output_shapes[0]);

View File

@ -64,8 +64,7 @@ void ov::op::internal::AUGRUSequence::validate_and_infer_types() {
OPENVINO_SUPPRESS_DEPRECATED_START OPENVINO_SUPPRESS_DEPRECATED_START
const auto input_shapes = get_node_input_partial_shapes(*this); const auto input_shapes = get_node_input_partial_shapes(*this);
OPENVINO_SUPPRESS_DEPRECATED_END OPENVINO_SUPPRESS_DEPRECATED_END
std::vector<ov::PartialShape> output_shapes = {ov::PartialShape::dynamic(4), ov::PartialShape::dynamic(3)}; std::vector<ov::PartialShape> output_shapes = shape_infer(this, input_shapes);
shape_infer(this, input_shapes, output_shapes);
// Set output size, type and shape // Set output size, type and shape
set_output_size(2); set_output_size(2);

View File

@ -34,8 +34,6 @@ public:
private: private:
std::string m_variable_id; std::string m_variable_id;
template <class T>
friend void shape_infer(const Assign* op, const std::vector<T>& input_shapes, std::vector<T>& output_shapes);
}; };
} // namespace v3 } // namespace v3
@ -72,10 +70,6 @@ public:
OPENVINO_SUPPRESS_DEPRECATED_END OPENVINO_SUPPRESS_DEPRECATED_END
bool has_evaluate() const override; bool has_evaluate() const override;
bool constant_fold(OutputVector& output_values, const OutputVector& inputs_values) override; bool constant_fold(OutputVector& output_values, const OutputVector& inputs_values) override;
private:
template <class T>
friend void shape_infer(const Assign* op, const std::vector<T>& input_shapes, std::vector<T>& output_shapes);
}; };
} // namespace v6 } // namespace v6
} // namespace op } // namespace op

View File

@ -58,11 +58,6 @@ public:
private: private:
Attributes m_attrs; Attributes m_attrs;
template <class T>
friend void shape_infer(const ExperimentalDetectronROIFeatureExtractor* op,
const std::vector<T>& input_shapes,
std::vector<T>& output_shapes);
}; };
} // namespace v6 } // namespace v6
} // namespace op } // namespace op

View File

@ -67,10 +67,6 @@ private:
Strides m_patch_movement_strides; Strides m_patch_movement_strides;
Shape m_patch_selection_rates; Shape m_patch_selection_rates;
PadType m_padding{PadType::EXPLICIT}; PadType m_padding{PadType::EXPLICIT};
template <class T>
friend void shape_infer(const ExtractImagePatches* op,
const std::vector<T>& input_shapes,
std::vector<T>& output_shapes);
}; };
} // namespace v3 } // namespace v3
} // namespace op } // namespace op

View File

@ -240,11 +240,6 @@ private:
/// \brief The order of gates in weights tensors. /// \brief The order of gates in weights tensors.
/// ///
LSTMWeightsFormat m_weights_format; LSTMWeightsFormat m_weights_format;
static constexpr std::size_t s_gates_count{4};
static constexpr std::size_t s_peepholes_count{3};
template <class T>
friend void shape_infer(const LSTMCell* op, const std::vector<T>& input_shapes, std::vector<T>& output_shapes);
}; };
} // namespace v0 } // namespace v0
@ -380,10 +375,6 @@ private:
/// \brief The Activation function h. /// \brief The Activation function h.
/// ///
util::ActivationFunction m_activation_h; util::ActivationFunction m_activation_h;
static constexpr std::size_t s_gates_count{4};
template <class T>
friend void shape_infer(const LSTMCell* op, const std::vector<T>& input_shapes, std::vector<T>& output_shapes);
}; };
} // namespace v4 } // namespace v4
} // namespace op } // namespace op

View File

@ -105,9 +105,6 @@ private:
std::vector<float> m_anchors{}; std::vector<float> m_anchors{};
int m_axis; int m_axis;
int m_end_axis; int m_end_axis;
template <class T>
friend void shape_infer(const RegionYolo* op, const std::vector<T>& input_shapes, std::vector<T>& output_shapes);
}; };
} // namespace v0 } // namespace v0
} // namespace op } // namespace op

View File

@ -12,19 +12,11 @@ namespace ov {
namespace op { namespace op {
namespace v8 { namespace v8 {
template <class TShape> template <class TShape, class TRShape = result_shape_t<TShape>>
std::vector<TShape> shape_infer(const AdaptiveAvgPool* op, std::vector<TRShape> shape_infer(const AdaptiveAvgPool* op,
const std::vector<TShape>& input_shapes, const std::vector<TShape>& input_shapes,
const std::map<size_t, HostTensorPtr>& constant_data = {}) { const ITensorAccessor& tensor_acessor = make_tensor_accessor()) {
return {pooling::out_shape_infer(op, input_shapes, constant_data)}; return {pooling::out_shape_infer(op, input_shapes, tensor_acessor)};
}
template <class TShape>
void shape_infer(const AdaptiveAvgPool* op,
const std::vector<TShape>& input_shapes,
std::vector<TShape>& output_shapes,
const std::map<size_t, HostTensorPtr>& constant_data = {}) {
output_shapes = shape_infer(op, input_shapes, constant_data);
} }
} // namespace v8 } // namespace v8
} // namespace op } // namespace op

View File

@ -12,19 +12,11 @@ namespace ov {
namespace op { namespace op {
namespace v8 { namespace v8 {
template <class TShape> template <class TShape, class TRShape = result_shape_t<TShape>>
std::vector<TShape> shape_infer(const AdaptiveMaxPool* op, std::vector<TRShape> shape_infer(const AdaptiveMaxPool* op,
const std::vector<TShape>& input_shapes, const std::vector<TShape>& input_shapes,
const std::map<size_t, HostTensorPtr>& constant_data = {}) { const ITensorAccessor& tensor_accessor = make_tensor_accessor()) {
return {2, pooling::out_shape_infer(op, input_shapes, constant_data)}; return {2, pooling::out_shape_infer(op, input_shapes, tensor_accessor)};
}
template <class TShape>
void shape_infer(const AdaptiveMaxPool* op,
const std::vector<TShape>& input_shapes,
std::vector<TShape>& output_shapes,
const std::map<size_t, HostTensorPtr>& constant_data = {}) {
output_shapes = shape_infer(op, input_shapes, constant_data);
} }
} // namespace v8 } // namespace v8
} // namespace op } // namespace op

View File

@ -5,18 +5,20 @@
#include <openvino/core/graph_util.hpp> #include <openvino/core/graph_util.hpp>
#include <openvino/op/assign.hpp> #include <openvino/op/assign.hpp>
#include "copy_shape_inference.hpp"
#include "utils.hpp" #include "utils.hpp"
namespace ov { namespace ov {
namespace op { namespace op {
namespace v3 { namespace v3 {
template <class T> template <class TShape, class TRShape = result_shape_t<TShape>>
void shape_infer(const Assign* op, const std::vector<T>& input_shapes, std::vector<T>& output_shapes) { std::vector<TRShape> shape_infer(const Assign* op, const std::vector<TShape>& input_shapes) {
NODE_VALIDATION_CHECK(op, input_shapes.size() == 1 && output_shapes.size() == 1); auto output_shapes = ov::op::copy_shape_infer(op, input_shapes);
const auto& input_shape = input_shapes[0]; const auto& input_shape = input_shapes[0];
const auto& variable_info = op->m_variable->get_info(); const auto& variable_info = op->get_variable()->get_info();
NODE_VALIDATION_CHECK(op, NODE_VALIDATION_CHECK(op,
op->m_variable_id == variable_info.variable_id, op->get_variable_id() == variable_info.variable_id,
"Variables identifiers are inconsistent."); "Variables identifiers are inconsistent.");
const auto& arg_t = op->get_input_element_type(0); const auto& arg_t = op->get_input_element_type(0);
NODE_VALIDATION_CHECK(op, arg_t == variable_info.data_type, "Variables types are inconsistent."); NODE_VALIDATION_CHECK(op, arg_t == variable_info.data_type, "Variables types are inconsistent.");
@ -26,16 +28,8 @@ void shape_infer(const Assign* op, const std::vector<T>& input_shapes, std::vect
input_shape.to_shape() == variable_info.data_shape.to_shape(), input_shape.to_shape() == variable_info.data_shape.to_shape(),
"Variables output shapes are inconsistent."); "Variables output shapes are inconsistent.");
} }
copy_shape_infer(op, input_shapes, output_shapes); return output_shapes;
} }
} // namespace v3 } // namespace v3
namespace v6 {
template <class T>
void shape_infer(const Assign* op, const std::vector<T>& input_shapes, std::vector<T>& output_shapes) {
copy_shape_infer(op, input_shapes, output_shapes);
}
} // namespace v6
} // namespace op } // namespace op
} // namespace ov } // namespace ov

View File

@ -12,10 +12,8 @@ namespace ov {
namespace op { namespace op {
namespace internal { namespace internal {
template <class ShapeType> template <class ShapeType, class TRShape = result_shape_t<ShapeType>>
void shape_infer(const ov::op::internal::AUGRUCell* op, std::vector<TRShape> shape_infer(const ov::op::internal::AUGRUCell* op, const std::vector<ShapeType>& input_shapes) {
const std::vector<ShapeType>& input_shapes,
std::vector<ShapeType>& output_shapes) {
constexpr size_t expected_in_shapes_count = 6; constexpr size_t expected_in_shapes_count = 6;
NODE_VALIDATION_CHECK(op, NODE_VALIDATION_CHECK(op,
input_shapes.size() == expected_in_shapes_count, input_shapes.size() == expected_in_shapes_count,
@ -27,7 +25,7 @@ void shape_infer(const ov::op::internal::AUGRUCell* op,
constexpr auto num_gates = 3; constexpr auto num_gates = 3;
constexpr auto num_state_nodes = 1; constexpr auto num_state_nodes = 1;
output_shapes = rnn::cell_base_shape_infer(op, input_shapes, num_gates, num_state_nodes); auto output_shapes = rnn::cell_base_shape_infer(op, input_shapes, num_gates, num_state_nodes);
// `A` input shape validation // [batch_size, 1] // `A` input shape validation // [batch_size, 1]
const auto& a_shape = input_shapes.back(); const auto& a_shape = input_shapes.back();
@ -41,6 +39,7 @@ void shape_infer(const ov::op::internal::AUGRUCell* op,
} }
NODE_VALIDATION_CHECK(op, a_shape[1].compatible(1), "The last dimension of `A` shape must be equal to `1`."); NODE_VALIDATION_CHECK(op, a_shape[1].compatible(1), "The last dimension of `A` shape must be equal to `1`.");
} }
return output_shapes;
} }
} // namespace internal } // namespace internal
} // namespace op } // namespace op

View File

@ -11,10 +11,9 @@ namespace ov {
namespace op { namespace op {
namespace internal { namespace internal {
template <class ShapeType> template <class ShapeType, class TRShape = result_shape_t<ShapeType>>
void shape_infer(const ov::op::internal::AUGRUSequence* op, std::vector<TRShape> shape_infer(const ov::op::internal::AUGRUSequence* op,
const std::vector<ShapeType>& input_shapes, const std::vector<ShapeType>& input_shapes) {
std::vector<ShapeType>& output_shapes) {
constexpr size_t expected_in_shapes_count = 7; constexpr size_t expected_in_shapes_count = 7;
NODE_VALIDATION_CHECK(op, NODE_VALIDATION_CHECK(op,
input_shapes.size() == expected_in_shapes_count, input_shapes.size() == expected_in_shapes_count,
@ -26,12 +25,12 @@ void shape_infer(const ov::op::internal::AUGRUSequence* op,
constexpr auto num_gates = 3; constexpr auto num_gates = 3;
constexpr auto num_state_nodes = 1; constexpr auto num_state_nodes = 1;
output_shapes = rnn::seq_base_shape_infer(op, auto output_shapes = rnn::seq_base_shape_infer(op,
input_shapes, input_shapes,
num_gates, num_gates,
num_state_nodes, num_state_nodes,
op->get_direction(), op->get_direction(),
op->get_linear_before_reset()); op->get_linear_before_reset());
// A input shape validation // [batch_size, seq_length, 1] // A input shape validation // [batch_size, seq_length, 1]
const auto& a_shape = input_shapes.back(); const auto& a_shape = input_shapes.back();
@ -48,6 +47,7 @@ void shape_infer(const ov::op::internal::AUGRUSequence* op,
} }
NODE_VALIDATION_CHECK(op, a_shape[2].compatible(1), "The last dimension of `A` shape must be equal to `1`."); NODE_VALIDATION_CHECK(op, a_shape[2].compatible(1), "The last dimension of `A` shape must be equal to `1`.");
} }
return output_shapes;
} }
} // namespace internal } // namespace internal
} // namespace op } // namespace op

View File

@ -36,12 +36,11 @@ inline void valid_dilated_kernel_with_padding(const v1::AvgPool* op,
} // namespace pooling } // namespace pooling
namespace v1 { namespace v1 {
template <class TShape, class TContainer> template <class TShape, class TContainer, class TRShape = result_shape_t<TShape>>
std::vector<TShape> shape_infer(const AvgPool* op, std::vector<TRShape> shape_infer(const AvgPool* op,
const std::vector<TShape>& input_shapes, const std::vector<TShape>& input_shapes,
TContainer& pads_begin, TContainer& pads_begin,
TContainer& pads_end, TContainer& pads_end) {
const std::map<size_t, HostTensorPtr>& constant_data = {}) {
NODE_VALIDATION_CHECK(op, input_shapes.size() == 1); NODE_VALIDATION_CHECK(op, input_shapes.size() == 1);
const auto& data_shape = input_shapes[0]; const auto& data_shape = input_shapes[0];
const auto dilations = Strides(op->get_kernel().size(), 1); const auto dilations = Strides(op->get_kernel().size(), 1);

View File

@ -16,10 +16,10 @@ namespace ov {
namespace op { namespace op {
namespace v1 { namespace v1 {
template <class TShape> template <class TShape, class TRShape = result_shape_t<TShape>>
std::vector<TShape> shape_infer(const BatchToSpace* op, std::vector<TRShape> shape_infer(const BatchToSpace* op,
const std::vector<TShape>& input_shapes, const std::vector<TShape>& input_shapes,
const std::map<size_t, HostTensorPtr>& constant_data = {}) { const ITensorAccessor& tensor_accessor = make_tensor_accessor()) {
using namespace ov::util; using namespace ov::util;
using ValType = typename TShape::value_type::value_type; using ValType = typename TShape::value_type::value_type;
NODE_VALIDATION_CHECK(op, input_shapes.size() == 4); NODE_VALIDATION_CHECK(op, input_shapes.size() == 4);
@ -29,10 +29,10 @@ std::vector<TShape> shape_infer(const BatchToSpace* op,
const auto& crops_begin_shape = input_shapes[2]; const auto& crops_begin_shape = input_shapes[2];
const auto& crops_end_shape = input_shapes[3]; const auto& crops_end_shape = input_shapes[3];
auto inputs_same_ps = crops_begin_shape; TRShape inputs_same_ps = crops_begin_shape;
NODE_VALIDATION_CHECK( NODE_VALIDATION_CHECK(
op, op,
TShape::merge_into(inputs_same_ps, crops_end_shape) && TShape::merge_into(inputs_same_ps, block_shape), TRShape::merge_into(inputs_same_ps, crops_end_shape) && TRShape::merge_into(inputs_same_ps, block_shape),
"block_shape, crops_begin and crops_end inputs must have the same shape. Got: ", "block_shape, crops_begin and crops_end inputs must have the same shape. Got: ",
block_shape, block_shape,
", ", ", ",
@ -45,6 +45,9 @@ std::vector<TShape> shape_infer(const BatchToSpace* op,
"block_shape and crops inputs must have rank 1. Got: ", "block_shape and crops inputs must have rank 1. Got: ",
inputs_same_ps.rank()); inputs_same_ps.rank());
auto output_shapes = std::vector<TRShape>(1);
auto& out_shape = output_shapes[0];
const auto data_rank = data_shape.rank(); const auto data_rank = data_shape.rank();
if (data_rank.is_static()) { if (data_rank.is_static()) {
constexpr size_t spatial_dim_offset = 1; constexpr size_t spatial_dim_offset = 1;
@ -65,10 +68,9 @@ std::vector<TShape> shape_infer(const BatchToSpace* op,
data_rank); data_rank);
} }
TShape out_shape;
out_shape.reserve(data_rank_size); out_shape.reserve(data_rank_size);
const auto blocks = get_input_const_data_as<TShape, int64_t>(op, 1, constant_data); const auto blocks = get_input_const_data_as<TRShape, int64_t>(op, 1, tensor_accessor);
if (blocks) { if (blocks) {
NODE_VALIDATION_CHECK(op, NODE_VALIDATION_CHECK(op,
std::none_of(begin(*blocks), end(*blocks), cmp::Less<int64_t>(1)), std::none_of(begin(*blocks), end(*blocks), cmp::Less<int64_t>(1)),
@ -81,9 +83,12 @@ std::vector<TShape> shape_infer(const BatchToSpace* op,
out_shape.emplace_back(dim::inf_bound); out_shape.emplace_back(dim::inf_bound);
} }
std::vector<int64_t> crops_begin_val, crops_end_val; const auto crops_begin = get_input_const_data_as<TRShape, int64_t>(op, 2, tensor_accessor);
if (get_data_as_int64<TShape>(2, op, crops_begin_val, constant_data) && const auto crops_end = get_input_const_data_as<TRShape, int64_t>(op, 3, tensor_accessor);
get_data_as_int64<TShape>(3, op, crops_end_val, constant_data)) { if (crops_begin && crops_end) {
auto& crops_begin_val = *crops_begin;
auto& crops_end_val = *crops_end;
constexpr auto is_invalid_crop = cmp::Less<int64_t>(0); constexpr auto is_invalid_crop = cmp::Less<int64_t>(0);
NODE_VALIDATION_CHECK(op, NODE_VALIDATION_CHECK(op,
std::none_of(begin(crops_begin_val), end(crops_begin_val), is_invalid_crop) && std::none_of(begin(crops_begin_val), end(crops_begin_val), is_invalid_crop) &&
@ -112,20 +117,12 @@ std::vector<TShape> shape_infer(const BatchToSpace* op,
} else { } else {
out_shape.insert(out_shape.end(), data_rank_size - spatial_dim_offset, Dimension::dynamic()); out_shape.insert(out_shape.end(), data_rank_size - spatial_dim_offset, Dimension::dynamic());
} }
return {out_shape};
} else { } else {
return {PartialShape::dynamic()}; out_shape = PartialShape::dynamic();
} }
}
template <class TShape> return output_shapes;
void shape_infer(const ov::op::v1::BatchToSpace* op,
const std::vector<TShape>& input_shapes,
std::vector<TShape>& output_shapes,
const std::map<size_t, HostTensorPtr>& constant_data = {}) {
output_shapes = shape_infer(op, input_shapes, constant_data);
} }
} // namespace v1 } // namespace v1
} // namespace op } // namespace op
} // namespace ov } // namespace ov

View File

@ -15,11 +15,11 @@ namespace ov {
namespace op { namespace op {
namespace util { namespace util {
template <typename T> template <typename T, class TRShape = result_shape_t<T>>
void validate_target_shape_none(const ov::Node* op, void validate_target_shape_none(const ov::Node* op,
const T& arg_shape, const T& arg_shape,
const AxisVector& axes_mapping_val, const AxisVector& axes_mapping_val,
const T& target_input_shape) { const TRShape& target_input_shape) {
if (arg_shape.rank().is_static() && target_input_shape.rank().is_static()) { if (arg_shape.rank().is_static() && target_input_shape.rank().is_static()) {
const auto target_rank_length = target_input_shape.size(); const auto target_rank_length = target_input_shape.size();
// axes_mapping needs to be in sorted order // axes_mapping needs to be in sorted order
@ -62,8 +62,8 @@ void validate_target_shape_none(const ov::Node* op,
} }
} }
template <typename T> template <typename T, class TRShape = result_shape_t<T>>
void validate_target_shape_numpy(const ov::Node* op, const T& arg_shape, const T& target_input_shape) { void validate_target_shape_numpy(const ov::Node* op, const T& arg_shape, const TRShape& target_input_shape) {
if (arg_shape.rank().is_dynamic() || target_input_shape.rank().is_dynamic()) { if (arg_shape.rank().is_dynamic() || target_input_shape.rank().is_dynamic()) {
return; return;
} }
@ -91,13 +91,13 @@ void validate_target_shape_numpy(const ov::Node* op, const T& arg_shape, const T
} }
} }
template <typename T> template <typename T, class TRShape = result_shape_t<T>>
void set_result_shape_pdpd(const ov::Node* op, void set_result_shape_pdpd(const ov::Node* op,
const T& arg0_shape, const T& arg0_shape,
const T& target_input_shape, const TRShape& target_input_shape,
T& result_shape, TRShape& result_shape,
const ov::op::BroadcastModeSpec& broadcast_spec) { const ov::op::BroadcastModeSpec& broadcast_spec) {
using DimType = typename std::iterator_traits<typename T::iterator>::value_type; using DimType = typename T::value_type;
if (arg0_shape.rank().is_dynamic() || target_input_shape.rank().is_dynamic()) { if (arg0_shape.rank().is_dynamic() || target_input_shape.rank().is_dynamic()) {
result_shape = PartialShape::dynamic(target_input_shape.rank()); result_shape = PartialShape::dynamic(target_input_shape.rank());
return; return;
@ -126,14 +126,17 @@ void set_result_shape_pdpd(const ov::Node* op,
} }
} }
template <typename T> template <typename T, class TRShape = result_shape_t<T>>
void set_result_shape_bidirectional(const ov::Node* op, const T& arg_shape, T& target_input_shape, T& result_shape) { void set_result_shape_bidirectional(const ov::Node* op,
using DimType = typename std::iterator_traits<typename T::iterator>::value_type; const T& arg_shape,
TRShape& target_input_shape,
TRShape& result_shape) {
using DimType = typename T::value_type;
if (arg_shape.rank().is_dynamic() || target_input_shape.rank().is_dynamic()) { if (arg_shape.rank().is_dynamic() || target_input_shape.rank().is_dynamic()) {
result_shape = PartialShape::dynamic(); result_shape = PartialShape::dynamic();
return; return;
} }
auto arg_shape_vec = arg_shape; auto arg_shape_vec = static_cast<TRShape>(arg_shape);
// Add left padding to the shape with smaller rank, if the ranks are not equal // Add left padding to the shape with smaller rank, if the ranks are not equal
if (arg_shape_vec.size() < target_input_shape.size()) { if (arg_shape_vec.size() < target_input_shape.size()) {
@ -154,12 +157,10 @@ void set_result_shape_bidirectional(const ov::Node* op, const T& arg_shape, T& t
} }
} }
template <class T> template <class T, class TRShape = result_shape_t<T>>
void broadcast_base_shape_infer( std::vector<TRShape> broadcast_base_shape_infer(const ov::op::util::BroadcastBase* op,
const ov::op::util::BroadcastBase* op, const std::vector<T>& input_shapes,
const std::vector<T>& input_shapes, const ITensorAccessor& ta = make_tensor_accessor()) {
std::vector<T>& output_shapes,
const std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>>& constant_data = {}) {
// shape node should produce a one dimensional shape. // shape node should produce a one dimensional shape.
auto broadcast_shape_rank = input_shapes[1].rank(); auto broadcast_shape_rank = input_shapes[1].rank();
NODE_VALIDATION_CHECK(op, NODE_VALIDATION_CHECK(op,
@ -177,35 +178,35 @@ void broadcast_base_shape_infer(
axes_shape_rank); axes_shape_rank);
} }
auto output_shapes = std::vector<TRShape>(1);
auto& result_shape = output_shapes[0]; auto& result_shape = output_shapes[0];
const auto& data_input_shape = input_shapes[0]; const auto& data_input_shape = input_shapes[0];
const auto& target_input_shape = input_shapes[1]; const auto& target_input_shape = input_shapes[1];
const bool is_target_input_shape_static = target_input_shape.is_static(); const bool is_target_input_shape_static = target_input_shape.is_static();
T target_as_shape; auto target_as_shape = get_input_const_data_as_shape<TRShape>(op, 1, ta);
bool is_target_shape_defined = get_data_as_shape<T>(1, op, target_as_shape, constant_data);
if (!is_target_shape_defined) { if (!target_as_shape) {
if (auto concat = ov::as_type_ptr<ov::opset1::Concat>(op->get_input_node_shared_ptr(1))) { if (auto concat = ov::as_type_ptr<ov::opset1::Concat>(op->get_input_node_shared_ptr(1))) {
const auto concat_inputs = concat->input_values(); const auto concat_inputs = concat->input_values();
if (concat->get_output_partial_shape(0).is_static() && concat->get_shape().size() == 1 && if (concat->get_output_partial_shape(0).is_static() && concat->get_shape().size() == 1 &&
concat_inputs.size() == shape_size(concat->get_shape())) { concat_inputs.size() == shape_size(concat->get_shape())) {
target_as_shape.emplace();
for (const auto& concat_input : concat_inputs) { for (const auto& concat_input : concat_inputs) {
auto source_node_ptr = concat_input.get_node_shared_ptr(); auto source_node_ptr = concat_input.get_node_shared_ptr();
if (auto source_const_ptr = ov::as_type_ptr<ov::opset1::Constant>(source_node_ptr)) { if (auto source_const_ptr = ov::as_type_ptr<ov::opset1::Constant>(source_node_ptr)) {
target_as_shape.push_back(source_const_ptr->get_axis_vector_val()[0]); target_as_shape->push_back(source_const_ptr->get_axis_vector_val()[0]);
} else { } else {
target_as_shape.push_back(Dimension::dynamic()); target_as_shape->push_back(Dimension::dynamic());
} }
} }
is_target_shape_defined = true;
} }
} }
} }
if (mode.m_type == BroadcastType::NONE) { if (mode.m_type == BroadcastType::NONE) {
if (is_target_shape_defined) { if (target_as_shape) {
result_shape = target_as_shape; result_shape = *target_as_shape;
} else if (is_target_input_shape_static) { } else if (is_target_input_shape_static) {
result_shape = PartialShape::dynamic(target_input_shape[0].get_length()); result_shape = PartialShape::dynamic(target_input_shape[0].get_length());
} else { } else {
@ -223,54 +224,54 @@ void broadcast_base_shape_infer(
axes_shape, axes_shape,
" doesn't match rank of input tensor ", " doesn't match rank of input tensor ",
input_rank); input_rank);
std::vector<int64_t> axes_mapping_val;
if (is_target_shape_defined && get_data_as_int64<T>(2, op, axes_mapping_val, constant_data)) { if (target_as_shape) {
AxisVector axes_mapping = if (auto axes_mapping = get_input_const_data_as<T, size_t, AxisVector>(op, 2, ta)) {
AxisVector(std::vector<size_t>(axes_mapping_val.begin(), axes_mapping_val.end())); validate_target_shape_none(op, data_input_shape, *axes_mapping, *target_as_shape);
validate_target_shape_none(op, data_input_shape, axes_mapping, target_as_shape); }
} }
} }
} else if (mode.m_type == BroadcastType::NUMPY) { } else if (mode.m_type == BroadcastType::NUMPY) {
if (is_target_shape_defined) { if (target_as_shape) {
result_shape = target_as_shape; result_shape = *target_as_shape;
validate_target_shape_numpy(op, data_input_shape, target_as_shape); validate_target_shape_numpy(op, data_input_shape, *target_as_shape);
} else if (is_target_input_shape_static) { } else if (is_target_input_shape_static) {
result_shape = PartialShape::dynamic(target_input_shape[0].get_length()); result_shape = PartialShape::dynamic(target_input_shape[0].get_length());
} else { } else {
result_shape = PartialShape::dynamic(); result_shape = PartialShape::dynamic();
} }
} else if (mode.m_type == BroadcastType::PDPD) { } else if (mode.m_type == BroadcastType::PDPD) {
if (is_target_shape_defined) { if (target_as_shape) {
set_result_shape_pdpd(op, data_input_shape, target_as_shape, result_shape, mode); set_result_shape_pdpd(op, data_input_shape, *target_as_shape, result_shape, mode);
} else if (is_target_input_shape_static) { } else if (is_target_input_shape_static) {
result_shape = PartialShape::dynamic(target_input_shape[0].get_length()); result_shape = PartialShape::dynamic(target_input_shape[0].get_length());
} else { } else {
result_shape = PartialShape::dynamic(); result_shape = PartialShape::dynamic();
} }
} else if (mode.m_type == BroadcastType::BIDIRECTIONAL) { } else if (mode.m_type == BroadcastType::BIDIRECTIONAL) {
if (is_target_shape_defined) { if (target_as_shape) {
set_result_shape_bidirectional(op, data_input_shape, target_as_shape, result_shape); set_result_shape_bidirectional(op, data_input_shape, *target_as_shape, result_shape);
} else if (data_input_shape.rank().is_static() && is_target_input_shape_static) { } else if (data_input_shape.rank().is_static() && is_target_input_shape_static) {
result_shape = PartialShape::dynamic(target_input_shape[0].get_length()); result_shape = PartialShape::dynamic(target_input_shape[0].get_length());
// The logic of BroadcastType::BIDIRECTIONAL matches broadcast_merge_into with AutoBroadcastType::NUMPY // The logic of BroadcastType::BIDIRECTIONAL matches broadcast_merge_into with AutoBroadcastType::NUMPY
NODE_VALIDATION_CHECK(op, NODE_VALIDATION_CHECK(
T::broadcast_merge_into(result_shape, data_input_shape, op::AutoBroadcastType::NUMPY), op,
"Broadcast shape inference failed, output shape calculation with " TRShape::broadcast_merge_into(result_shape, data_input_shape, op::AutoBroadcastType::NUMPY),
"'broadcast_merge_into' was unsuccessful."); "Broadcast shape inference failed, output shape calculation with "
"'broadcast_merge_into' was unsuccessful.");
} else { } else {
result_shape = PartialShape::dynamic(); result_shape = PartialShape::dynamic();
} }
} }
return output_shapes;
} }
} // namespace util } // namespace util
namespace v3 { namespace v3 {
template <class T> template <class T, class TRShape = result_shape_t<T>>
void shape_infer(const ov::op::v3::Broadcast* op, std::vector<TRShape> shape_infer(const ov::op::v3::Broadcast* op,
const std::vector<T>& input_shapes, const std::vector<T>& input_shapes,
std::vector<T>& output_shapes, const ITensorAccessor& ta = make_tensor_accessor()) {
const std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>>& constant_data = {}) {
NODE_VALIDATION_CHECK(op, output_shapes.size() == 1);
auto& mode = op->get_broadcast_spec(); auto& mode = op->get_broadcast_spec();
if (mode.m_type == BroadcastType::NONE) { if (mode.m_type == BroadcastType::NONE) {
NODE_VALIDATION_CHECK(op, NODE_VALIDATION_CHECK(op,
@ -281,19 +282,18 @@ void shape_infer(const ov::op::v3::Broadcast* op,
input_shapes.size() == 2, input_shapes.size() == 2,
"axes_mapping input should not be provided for mode other than explicit"); "axes_mapping input should not be provided for mode other than explicit");
} }
broadcast_base_shape_infer(op, input_shapes, output_shapes, constant_data); return broadcast_base_shape_infer(op, input_shapes, ta);
} }
} // namespace v3 } // namespace v3
namespace v1 { namespace v1 {
template <class T> template <class T, class TRShape = result_shape_t<T>>
void shape_infer(const ov::op::v1::Broadcast* op, std::vector<TRShape> shape_infer(const ov::op::v1::Broadcast* op,
const std::vector<T>& input_shapes, const std::vector<T>& input_shapes,
std::vector<T>& output_shapes, const ITensorAccessor& ta = make_tensor_accessor()) {
const std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>>& constant_data = {}) { NODE_VALIDATION_CHECK(op, (input_shapes.size() == 2 || input_shapes.size() == 3));
NODE_VALIDATION_CHECK(op, output_shapes.size() == 1 && (input_shapes.size() == 2 || input_shapes.size() == 3));
broadcast_base_shape_infer(op, input_shapes, output_shapes, constant_data); return broadcast_base_shape_infer(op, input_shapes, ta);
} }
} // namespace v1 } // namespace v1

View File

@ -4,14 +4,15 @@
#pragma once #pragma once
#include <openvino/op/bucketize.hpp> #include "openvino/op/bucketize.hpp"
#include "utils.hpp"
namespace ov { namespace ov {
namespace op { namespace op {
namespace v3 { namespace v3 {
template <class TShape> template <class TShape, class TRShape = result_shape_t<TShape>>
std::vector<TShape> shape_infer(const Bucketize* op, const std::vector<TShape>& input_shapes) { std::vector<TRShape> shape_infer(const Bucketize* op, const std::vector<TShape>& input_shapes) {
NODE_VALIDATION_CHECK(op, (input_shapes.size() == 2)); NODE_VALIDATION_CHECK(op, (input_shapes.size() == 2));
const auto& data_shape = input_shapes[0]; const auto& data_shape = input_shapes[0];
@ -23,11 +24,6 @@ std::vector<TShape> shape_infer(const Bucketize* op, const std::vector<TShape>&
buckets_shape); buckets_shape);
return {data_shape}; return {data_shape};
} }
template <class TShape>
void shape_infer(const Bucketize* op, const std::vector<TShape>& input_shapes, std::vector<TShape>& output_shapes) {
output_shapes = shape_infer(op, input_shapes);
}
} // namespace v3 } // namespace v3
} // namespace op } // namespace op
} // namespace ov } // namespace ov

View File

@ -6,6 +6,7 @@
#include <type_traits> #include <type_traits>
#include "openvino/core/type/bfloat16.hpp"
#include "openvino/core/type/float16.hpp" #include "openvino/core/type/float16.hpp"
namespace ov { namespace ov {
@ -108,9 +109,10 @@ public:
*/ */
template <class T, template <class T,
class U, class U,
typename std::enable_if<((std::is_signed<T>::value || std::is_same<T, float16>::value) && typename std::enable_if<
(std::is_signed<U>::value || std::is_same<U, float16>::value)) || ((std::is_signed<T>::value || std::is_same<T, float16>::value || std::is_same<T, bfloat16>::value) &&
(std::is_unsigned<T>::value && std::is_unsigned<U>::value)>::type* = nullptr> (std::is_signed<U>::value || std::is_same<U, float16>::value || std::is_same<U, bfloat16>::value)) ||
(std::is_unsigned<T>::value && std::is_unsigned<U>::value)>::type* = nullptr>
constexpr bool lt(T a, U b) noexcept { constexpr bool lt(T a, U b) noexcept {
return a < b; return a < b;
} }
@ -125,7 +127,8 @@ constexpr bool lt(T a, U b) noexcept {
template <class T, template <class T,
class U, class U,
typename std::enable_if<(std::is_floating_point<T>::value || std::is_same<T, float16>::value) && typename std::enable_if<(std::is_floating_point<T>::value || std::is_same<T, float16>::value ||
std::is_same<T, bfloat16>::value) &&
std::is_unsigned<U>::value>::type* = nullptr> std::is_unsigned<U>::value>::type* = nullptr>
constexpr bool lt(T a, U b) noexcept { constexpr bool lt(T a, U b) noexcept {
return a < 0 ? true : a < b; return a < 0 ? true : a < b;
@ -141,8 +144,9 @@ constexpr bool lt(T a, U b) noexcept {
template <class T, template <class T,
class U, class U,
typename std::enable_if<std::is_unsigned<T>::value && (std::is_floating_point<U>::value || typename std::enable_if<std::is_unsigned<T>::value &&
std::is_same<U, float16>::value)>::type* = nullptr> (std::is_floating_point<U>::value || std::is_same<U, float16>::value ||
std::is_same<U, bfloat16>::value)>::type* = nullptr>
constexpr bool lt(T a, U b) noexcept { constexpr bool lt(T a, U b) noexcept {
return b < 0 ? false : a < b; return b < 0 ? false : a < b;
} }

View File

@ -12,14 +12,15 @@ namespace ov {
namespace op { namespace op {
namespace v0 { namespace v0 {
template <class T> template <class T, class TRShape = result_shape_t<T>>
void shape_infer(const Concat* op, const std::vector<T>& input_shapes, std::vector<T>& output_shapes) { std::vector<TRShape> shape_infer(const Concat* op, const std::vector<T>& input_shapes) {
using DimType = typename std::iterator_traits<typename T::iterator>::value_type; using DimType = typename T::value_type;
const auto concat_axis = op->get_concatenation_axis(); const auto concat_axis = op->get_concatenation_axis();
const auto empty_dim = DimType{}; const auto empty_dim = DimType{};
auto concat_dim = DimType{0}; auto concat_dim = DimType{0};
auto output_shapes = std::vector<TRShape>(1);
auto& output_shape = output_shapes.front(); auto& output_shape = output_shapes.front();
if (std::is_same<T, PartialShape>::value) { if (std::is_same<T, PartialShape>::value) {
@ -29,16 +30,16 @@ void shape_infer(const Concat* op, const std::vector<T>& input_shapes, std::vect
output_shape[concat_axis] = empty_dim; output_shape[concat_axis] = empty_dim;
} }
for (auto input : input_shapes) { for (auto& input : input_shapes) {
if (input.rank().is_static()) { if (input.rank().is_static()) {
concat_dim += input[concat_axis]; auto in_copy = TRShape(input);
input[concat_axis] = empty_dim; concat_dim += in_copy[concat_axis];
in_copy[concat_axis] = empty_dim;
NODE_VALIDATION_CHECK(op, NODE_VALIDATION_CHECK(op,
T::merge_into(output_shape, input), TRShape::merge_into(output_shape, in_copy),
"Argument shapes are inconsistent; they must have the same rank, and must " "Argument shapes are inconsistent; they must have the same rank, and must "
"have ", "have equal dimension everywhere except on the concatenation axis (axis ",
"equal dimension everywhere except on the concatenation axis (axis ",
concat_axis, concat_axis,
")."); ").");
} else { } else {
@ -49,6 +50,7 @@ void shape_infer(const Concat* op, const std::vector<T>& input_shapes, std::vect
if (output_shape.rank().is_static()) { if (output_shape.rank().is_static()) {
output_shape[concat_axis] = concat_dim; output_shape[concat_axis] = concat_dim;
} }
return output_shapes;
} }
} // namespace v0 } // namespace v0
} // namespace op } // namespace op

View File

@ -11,43 +11,46 @@
namespace ov { namespace ov {
namespace op { namespace op {
namespace v1 { namespace v1 {
template <class TShape> template <class TShape, class TRShape = result_shape_t<TShape>>
std::vector<TShape> shape_infer(const ConvolutionBackpropData* op, std::vector<TRShape> shape_infer(const ConvolutionBackpropData* op,
const std::vector<TShape>& input_shapes, const std::vector<TShape>& input_shapes,
CoordinateDiff& pads_begin, CoordinateDiff& pads_begin,
CoordinateDiff& pads_end, CoordinateDiff& pads_end,
const std::map<size_t, HostTensorPtr>& constant_data = {}) { const ITensorAccessor& ta = make_tensor_accessor()) {
const auto inputs_count = input_shapes.size(); const auto inputs_count = input_shapes.size();
const auto has_spatial_shape = inputs_count >= 3; const auto has_spatial_shape = inputs_count >= 3;
NODE_VALIDATION_CHECK(op, inputs_count >= 2); NODE_VALIDATION_CHECK(op, inputs_count >= 2);
using namespace ov::util; using namespace ov::util;
TShape out_spatial_shape; ov::optional<TRShape> out_spatial_shape;
if (has_spatial_shape) { if (has_spatial_shape) {
const auto& spatial_shape = input_shapes[2]; const auto& spatial_shape = input_shapes[2];
NODE_VALIDATION_CHECK(op, NODE_VALIDATION_CHECK(op,
spatial_shape.rank().compatible(1), spatial_shape.rank().compatible(1),
"Input delivering output shape must have rank 1."); "Input delivering output shape must have rank 1.");
out_spatial_shape = get_input_const_data_as_shape<TRShape>(op, 2, ta);
if (!get_data_as_shape(2, op, out_spatial_shape, constant_data)) { if (!out_spatial_shape) {
if (spatial_shape.is_static()) { if (spatial_shape.is_static()) {
out_spatial_shape.resize(spatial_shape[0].get_length()); out_spatial_shape.emplace();
out_spatial_shape->resize(spatial_shape[0].get_length());
} else { } else {
out_spatial_shape = PartialShape::dynamic(); out_spatial_shape = PartialShape::dynamic();
} }
} }
} else {
out_spatial_shape.emplace();
} }
const auto num_spatial = convolution::calculate_num_spatial(op, input_shapes, out_spatial_shape); const auto num_spatial = convolution::calculate_num_spatial(op, input_shapes, *out_spatial_shape);
TShape output_shape; TRShape output_shape;
if (num_spatial != convolution::num_spatial_undefined) { if (num_spatial != convolution::num_spatial_undefined) {
const auto& data_shape = input_shapes[0]; const auto& data_shape = input_shapes[0];
const auto& filters_shape = input_shapes[1]; const auto& filters_shape = input_shapes[1];
NODE_VALIDATION_CHECK( NODE_VALIDATION_CHECK(
op, op,
!has_spatial_shape || out_spatial_shape.rank().is_dynamic() || out_spatial_shape.size() == num_spatial, !has_spatial_shape || out_spatial_shape->rank().is_dynamic() || out_spatial_shape->size() == num_spatial,
"Output shape should be defined for all and only spatial dimensions."); "Output shape should be defined for all and only spatial dimensions.");
convolution::resize_empty_padding(num_spatial, pads_begin, pads_end); convolution::resize_empty_padding(num_spatial, pads_begin, pads_end);
@ -56,7 +59,7 @@ std::vector<TShape> shape_infer(const ConvolutionBackpropData* op,
convolution::validate::data_shape(op, data_shape); convolution::validate::data_shape(op, data_shape);
convolution::validate::common_attributes(op, num_spatial, pads_begin, pads_end); convolution::validate::common_attributes(op, num_spatial, pads_begin, pads_end);
} }
convolution::apply_padding(op, input_shapes, out_spatial_shape, pads_begin, pads_end); convolution::apply_padding(op, input_shapes, *out_spatial_shape, pads_begin, pads_end);
output_shape.reserve(util::spatial_dim_offset + num_spatial); output_shape.reserve(util::spatial_dim_offset + num_spatial);
output_shape.emplace_back(data_shape.rank().is_static() ? data_shape[0] : dim::inf_bound); output_shape.emplace_back(data_shape.rank().is_static() ? data_shape[0] : dim::inf_bound);
@ -64,8 +67,8 @@ std::vector<TShape> shape_infer(const ConvolutionBackpropData* op,
if (has_spatial_shape) { if (has_spatial_shape) {
output_shape.insert(output_shape.end(), output_shape.insert(output_shape.end(),
std::make_move_iterator(out_spatial_shape.begin()), std::make_move_iterator(out_spatial_shape->begin()),
std::make_move_iterator(out_spatial_shape.end())); std::make_move_iterator(out_spatial_shape->end()));
} else { } else {
convolution::append_spatial_shape(op, data_shape, filters_shape, pads_begin, pads_end, output_shape); convolution::append_spatial_shape(op, data_shape, filters_shape, pads_begin, pads_end, output_shape);
} }

View File

@ -40,7 +40,9 @@ void filter_shape(const ov::op::util::ConvolutionBackPropBase* op,
template <class TOp, template <class TOp,
class TShape, class TShape,
typename std::enable_if<std::is_base_of<util::ConvolutionBackPropBase, TOp>::value>::type* = nullptr> typename std::enable_if<std::is_base_of<util::ConvolutionBackPropBase, TOp>::value>::type* = nullptr>
size_t calculate_num_spatial(const TOp* op, const std::vector<TShape>& input_shapes, const TShape& out_spatial_shape) { size_t calculate_num_spatial(const TOp* op,
const std::vector<TShape>& input_shapes,
const result_shape_t<TShape>& out_spatial_shape) {
NODE_VALIDATION_CHECK(op, input_shapes.size() > 1); NODE_VALIDATION_CHECK(op, input_shapes.size() > 1);
auto num_spatial = util::get_num_spatial(op); auto num_spatial = util::get_num_spatial(op);
@ -77,7 +79,7 @@ template <class TOp, class TShape, class TIter>
void apply_auto_pad(const TOp* op, void apply_auto_pad(const TOp* op,
const TShape& data_shape, const TShape& data_shape,
const TShape& filters_shape, const TShape& filters_shape,
const TShape& out_spatial_shape, const result_shape_t<TShape>& out_spatial_shape,
TIter pads_begin, TIter pads_begin,
TIter pads_end) { TIter pads_end) {
const auto& strides = op->get_strides(); const auto& strides = op->get_strides();
@ -94,12 +96,12 @@ void apply_auto_pad(const TOp* op,
for (size_t i = 0; i < num_spatial; ++i, ++pad_b, ++pad_e, ++data_dim, ++filter_dim) { for (size_t i = 0; i < num_spatial; ++i, ++pad_b, ++pad_e, ++data_dim, ++filter_dim) {
using namespace ov::util; using namespace ov::util;
if (data_dim->is_static() && filter_dim->is_static() && out_spatial_shape[i].is_static()) { if (dim::is_static(*data_dim) && dim::is_static(*filter_dim) && out_spatial_shape[i].is_static()) {
const auto dilated_filter = dim::dilated(*filter_dim, dilations[i]); const auto dilated_filter = dim::dilated(*filter_dim, dilations[i]);
const auto dim_len = static_cast<int64_t>(data_dim->get_length() - 1); const auto dim_len = static_cast<int64_t>(dim::get_length(*data_dim) - 1);
const auto padding = std::max<int64_t>( const auto padding = std::max<int64_t>(dim_len * strides[i] + dim::get_length(dilated_filter) -
dim_len * strides[i] + dilated_filter.get_length() - out_spatial_shape[i].get_length() + out_padding[i], out_spatial_shape[i].get_length() + out_padding[i],
0); 0);
*pad_b = padding / 2; *pad_b = padding / 2;
*pad_e = padding - *pad_b; *pad_e = padding - *pad_b;
@ -122,7 +124,7 @@ void apply_auto_pad(const TOp* op,
template <class TShape> template <class TShape>
void apply_padding(const util::ConvolutionBackPropBase* op, void apply_padding(const util::ConvolutionBackPropBase* op,
const std::vector<TShape>& input_shapes, const std::vector<TShape>& input_shapes,
const TShape& out_spatial_shape, const result_shape_t<TShape>& out_spatial_shape,
CoordinateDiff& pads_begin, CoordinateDiff& pads_begin,
CoordinateDiff& pads_end) { CoordinateDiff& pads_end) {
const auto& data_shape = input_shapes[0]; const auto& data_shape = input_shapes[0];
@ -165,7 +167,7 @@ void append_spatial_shape(const TOp* op,
const TShape& filters_shape, const TShape& filters_shape,
const TContainer& pads_begin, const TContainer& pads_begin,
const TContainer& pads_end, const TContainer& pads_end,
TShape& out_shape) { result_shape_t<TShape>& out_shape) {
using namespace ov::util; using namespace ov::util;
const auto& strides = op->get_strides(); const auto& strides = op->get_strides();

View File

@ -10,18 +10,21 @@
namespace ov { namespace ov {
namespace op { namespace op {
namespace v1 { namespace v1 {
template <class TFrowardConv, class TShape, class TContainer> template <class TOp,
std::vector<TShape> shape_infer(const TFrowardConv* op, class TShape,
const std::vector<TShape>& input_shapes, class TRShape = result_shape_t<TShape>,
TContainer& pads_begin, typename std::enable_if<std::is_same<TOp, Convolution>::value ||
TContainer& pads_end, std::is_same<TOp, BinaryConvolution>::value>::type* = nullptr>
const std::map<size_t, HostTensorPtr>& constant_data = {}) { std::vector<TRShape> shape_infer(const TOp* op,
const std::vector<TShape>& input_shapes,
CoordinateDiff& pads_begin,
CoordinateDiff& pads_end) {
NODE_VALIDATION_CHECK(op, input_shapes.size() >= 2); NODE_VALIDATION_CHECK(op, input_shapes.size() >= 2);
using namespace ov::util; using namespace ov::util;
const auto num_spatial = convolution::calculate_num_spatial(op, input_shapes); const auto num_spatial = convolution::calculate_num_spatial(op, input_shapes);
TShape output_shape; TRShape output_shape;
if (num_spatial != util::num_spatial_undefined) { if (num_spatial != util::num_spatial_undefined) {
const auto& data_shape = input_shapes[0]; const auto& data_shape = input_shapes[0];
const auto& filters_shape = input_shapes[1]; const auto& filters_shape = input_shapes[1];

View File

@ -189,8 +189,8 @@ void apply_auto_pad(const TOp* op,
for (size_t i = 0; i < num_spatial; ++i, ++pad_b, ++pad_e, ++data_dim, ++kernel_dim) { for (size_t i = 0; i < num_spatial; ++i, ++pad_b, ++pad_e, ++data_dim, ++kernel_dim) {
using namespace ov::util; using namespace ov::util;
if (kernel_dim->is_static()) { if (dim::is_static(*kernel_dim)) {
std::tie(*pad_b, *pad_e) = dim::padding(*data_dim, kernel_dim->get_length(), dilations[i], strides[i]); std::tie(*pad_b, *pad_e) = dim::padding(*data_dim, dim::get_length(*kernel_dim), dilations[i], strides[i]);
} else { } else {
*pad_b = 0; *pad_b = 0;
*pad_e = 0; *pad_e = 0;
@ -241,6 +241,7 @@ void apply_padding(const TOp* op,
*/ */
template <class TOp, template <class TOp,
class TShape, class TShape,
class TRShape = result_shape_t<TShape>,
typename std::enable_if<std::is_base_of<util::ConvolutionFwdPropBase, TOp>::value || typename std::enable_if<std::is_base_of<util::ConvolutionFwdPropBase, TOp>::value ||
std::is_base_of<util::DeformableConvolutionBase, TOp>::value>::type* = nullptr> std::is_base_of<util::DeformableConvolutionBase, TOp>::value>::type* = nullptr>
void append_spatial_shape(const TOp* op, void append_spatial_shape(const TOp* op,
@ -248,7 +249,7 @@ void append_spatial_shape(const TOp* op,
const TShape& filters_shape, const TShape& filters_shape,
CoordinateDiff& pads_begin, CoordinateDiff& pads_begin,
CoordinateDiff& pads_end, CoordinateDiff& pads_end,
TShape& out_shape) { TRShape& out_shape) {
using namespace ov::util; using namespace ov::util;
using TDim = typename TShape::value_type; using TDim = typename TShape::value_type;
@ -266,8 +267,8 @@ void append_spatial_shape(const TOp* op,
const auto& dilations = op->get_dilations(); const auto& dilations = op->get_dilations();
for (size_t i = 0; i < spatial_num; ++i, ++data_dim, ++filters_dim) { for (size_t i = 0; i < spatial_num; ++i, ++data_dim, ++filters_dim) {
auto dim = *data_dim + (pads_begin[i] + pads_end[i]); TDim dim = *data_dim + (pads_begin[i] + pads_end[i]);
const auto filter_dilated = dim::dilated(*filters_dim, dilations[i]); const TDim filter_dilated = dim::dilated(*filters_dim, dilations[i]);
if (dim.is_static() && filter_dilated.is_static()) { if (dim.is_static() && filter_dilated.is_static()) {
// Use check from pooling op as it is same. // Use check from pooling op as it is same.

View File

@ -0,0 +1,17 @@
// Copyright (C) 2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "utils.hpp"
namespace ov {
namespace op {
template <class TShape, class TRShape = result_shape_t<TShape>>
std::vector<TRShape> copy_shape_infer(const Node* op, const std::vector<TShape>& input_shapes) {
NODE_VALIDATION_CHECK(op, input_shapes.size() == 1, "Incorrect number of input shapes");
return {input_shapes[0]};
}
} // namespace op
} // namespace ov

View File

@ -3,13 +3,14 @@
// //
#pragma once #pragma once
#include "openvino/op/ctc_greedy_decoder_seq_len.hpp" #include "openvino/op/ctc_greedy_decoder_seq_len.hpp"
#include "utils.hpp"
namespace ov { namespace ov {
namespace op { namespace op {
namespace v6 { namespace v6 {
template <class TShape> template <class TShape, class TRShape = result_shape_t<TShape>>
std::vector<TShape> shape_infer(const CTCGreedyDecoderSeqLen* op, const std::vector<TShape>& input_shapes) { std::vector<TRShape> shape_infer(const CTCGreedyDecoderSeqLen* op, const std::vector<TShape>& input_shapes) {
NODE_VALIDATION_CHECK(op, input_shapes.size() == 2 || input_shapes.size() == 3); NODE_VALIDATION_CHECK(op, input_shapes.size() == 2 || input_shapes.size() == 3);
using DimType = typename TShape::value_type; using DimType = typename TShape::value_type;
@ -41,14 +42,7 @@ std::vector<TShape> shape_infer(const CTCGreedyDecoderSeqLen* op, const std::vec
"The first dimensions of input tensors must match."); "The first dimensions of input tensors must match.");
} }
return {TShape{batch_size, time_size}, TShape{batch_size}}; return {TRShape{batch_size, time_size}, TRShape{batch_size}};
}
template <class TShape>
void shape_infer(const CTCGreedyDecoderSeqLen* op,
const std::vector<TShape>& input_shapes,
std::vector<TShape>& output_shapes) {
output_shapes = shape_infer(op, input_shapes);
} }
} // namespace v6 } // namespace v6
} // namespace op } // namespace op

View File

@ -2,14 +2,15 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#pragma once #pragma once
#include <openvino/op/ctc_greedy_decoder.hpp> #include "openvino/op/ctc_greedy_decoder.hpp"
#include "utils.hpp"
namespace ov { namespace ov {
namespace op { namespace op {
namespace v0 { namespace v0 {
template <class TShape> template <class TShape, class TRShape = result_shape_t<TShape>>
std::vector<TShape> shape_infer(const CTCGreedyDecoder* op, const std::vector<TShape>& input_shapes) { std::vector<TRShape> shape_infer(const CTCGreedyDecoder* op, const std::vector<TShape>& input_shapes) {
NODE_VALIDATION_CHECK(op, input_shapes.size() == 2); NODE_VALIDATION_CHECK(op, input_shapes.size() == 2);
using DimType = typename TShape::value_type; using DimType = typename TShape::value_type;
@ -46,14 +47,7 @@ std::vector<TShape> shape_infer(const CTCGreedyDecoder* op, const std::vector<TS
DimType::merge(batch_size, batch_size, seq_mask_pshape[1]), DimType::merge(batch_size, batch_size, seq_mask_pshape[1]),
"The second dimensions of input tensors must match."); "The second dimensions of input tensors must match.");
} }
return {TShape(std::move(output_dims))}; return {TRShape(std::move(output_dims))};
}
template <class TShape>
void shape_infer(const CTCGreedyDecoder* op,
const std::vector<TShape>& input_shapes,
std::vector<TShape>& output_shapes) {
output_shapes = shape_infer(op, input_shapes);
} }
} // namespace v0 } // namespace v0
} // namespace op } // namespace op

View File

@ -4,7 +4,9 @@
#pragma once #pragma once
#include <array> #include <array>
#include <openvino/op/ctc_loss.hpp>
#include "openvino/op/ctc_loss.hpp"
#include "utils.hpp"
namespace ov { namespace ov {
namespace op { namespace op {
@ -16,8 +18,8 @@ constexpr auto shape_names =
constexpr auto shape_ranks = std::array<int64_t, 4>{3, 1, 2, 1}; constexpr auto shape_ranks = std::array<int64_t, 4>{3, 1, 2, 1};
} // namespace ctc_loss } // namespace ctc_loss
template <class TShape> template <class TShape, class TRShape = result_shape_t<TShape>>
std::vector<TShape> shape_infer(const CTCLoss* op, const std::vector<TShape>& input_shapes) { std::vector<TRShape> shape_infer(const CTCLoss* op, const std::vector<TShape>& input_shapes) {
using DimType = typename TShape::value_type; using DimType = typename TShape::value_type;
NODE_VALIDATION_CHECK(op, input_shapes.size() == 4 || input_shapes.size() == 5); NODE_VALIDATION_CHECK(op, input_shapes.size() == 4 || input_shapes.size() == 5);
@ -89,14 +91,8 @@ std::vector<TShape> shape_infer(const CTCLoss* op, const std::vector<TShape>& in
" and: ", " and: ",
batch_size); batch_size);
return {TShape{batch_size}}; return {TRShape{batch_size}};
} }
template <class TShape>
void shape_infer(const CTCLoss* op, const std::vector<TShape>& input_shapes, std::vector<TShape>& output_shapes) {
output_shapes = shape_infer(op, input_shapes);
}
} // namespace v4 } // namespace v4
} // namespace op } // namespace op
} // namespace ov } // namespace ov

View File

@ -63,19 +63,18 @@ void deformable_group_divisible_dimension(const TDeformableConv* op, const TDim&
} // namespace deformable_conv } // namespace deformable_conv
namespace util { namespace util {
template <class TShape> template <class TShape, class TRShape = result_shape_t<TShape>>
std::vector<TShape> shape_infer(const DeformableConvolutionBase* op, std::vector<TRShape> shape_infer(const DeformableConvolutionBase* op,
const std::vector<TShape>& input_shapes, const std::vector<TShape>& input_shapes,
CoordinateDiff& pads_begin, CoordinateDiff& pads_begin,
CoordinateDiff& pads_end, CoordinateDiff& pads_end) {
const std::map<size_t, HostTensorPtr>& constant_data = {}) {
static constexpr std::array<const char*, 4> names{"Input", "Offsets", "Filters", "Mask"}; static constexpr std::array<const char*, 4> names{"Input", "Offsets", "Filters", "Mask"};
using namespace ov::util; using namespace ov::util;
using TDim = typename TShape::value_type; using TDim = typename TShape::value_type;
const auto num_spatial = deformable_conv::calculate_num_spatial(op, input_shapes); const auto num_spatial = deformable_conv::calculate_num_spatial(op, input_shapes);
TShape output_shape; TRShape output_shape;
if (num_spatial != convolution::num_spatial_undefined) { if (num_spatial != convolution::num_spatial_undefined) {
const auto& data_shape = input_shapes[0]; const auto& data_shape = input_shapes[0];
const auto& offsets_shape = input_shapes[1]; const auto& offsets_shape = input_shapes[1];
@ -168,24 +167,22 @@ std::vector<TShape> shape_infer(const DeformableConvolutionBase* op,
} // namespace util } // namespace util
namespace v1 { namespace v1 {
template <class TShape> template <class TShape, class TRShape = result_shape_t<TShape>>
std::vector<TShape> shape_infer(const DeformableConvolution* op, std::vector<TRShape> shape_infer(const DeformableConvolution* op,
const std::vector<TShape>& input_shapes, const std::vector<TShape>& input_shapes,
CoordinateDiff& pads_begin, CoordinateDiff& pads_begin,
CoordinateDiff& pads_end, CoordinateDiff& pads_end) {
const std::map<size_t, HostTensorPtr>& constant_data = {}) {
NODE_VALIDATION_CHECK(op, input_shapes.size() == 3); NODE_VALIDATION_CHECK(op, input_shapes.size() == 3);
return util::shape_infer(op, input_shapes, pads_begin, pads_end, constant_data); return util::shape_infer(op, input_shapes, pads_begin, pads_end);
} }
} // namespace v1 } // namespace v1
namespace v8 { namespace v8 {
template <class TShape> template <class TShape, class TRShape = result_shape_t<TShape>>
std::vector<TShape> shape_infer(const DeformableConvolution* op, std::vector<TRShape> shape_infer(const DeformableConvolution* op,
const std::vector<TShape>& input_shapes, const std::vector<TShape>& input_shapes,
CoordinateDiff& pads_begin, CoordinateDiff& pads_begin,
CoordinateDiff& pads_end, CoordinateDiff& pads_end) {
const std::map<size_t, HostTensorPtr>& constant_data = {}) {
const auto has_mask_shape = input_shapes.size() == 4; const auto has_mask_shape = input_shapes.size() == 4;
NODE_VALIDATION_CHECK(op, input_shapes.size() == 3 || has_mask_shape); NODE_VALIDATION_CHECK(op, input_shapes.size() == 3 || has_mask_shape);
using TDim = typename TShape::value_type; using TDim = typename TShape::value_type;
@ -227,7 +224,7 @@ std::vector<TShape> shape_infer(const DeformableConvolution* op,
} }
} }
auto output_shapes = util::shape_infer(op, input_shapes, pads_begin, pads_end, constant_data); auto output_shapes = util::shape_infer(op, input_shapes, pads_begin, pads_end);
// post infer checks // post infer checks
if (has_mask_shape && input_shapes[3].rank().is_static() && output_shapes[0].rank().is_static()) { if (has_mask_shape && input_shapes[3].rank().is_static() && output_shapes[0].rank().is_static()) {
auto mask_dim = input_shapes[3].begin() + util::spatial_dim_offset; auto mask_dim = input_shapes[3].begin() + util::spatial_dim_offset;

View File

@ -3,13 +3,14 @@
// //
#pragma once #pragma once
#include "openvino/op/deformable_psroi_pooling.hpp" #include "openvino/op/deformable_psroi_pooling.hpp"
#include "utils.hpp"
namespace ov { namespace ov {
namespace op { namespace op {
namespace v1 { namespace v1 {
template <class TShape> template <class TShape, class TRShape = result_shape_t<TShape>>
std::vector<TShape> shape_infer(const DeformablePSROIPooling* op, const std::vector<TShape>& input_shapes) { std::vector<TRShape> shape_infer(const DeformablePSROIPooling* op, const std::vector<TShape>& input_shapes) {
NODE_VALIDATION_CHECK(op, input_shapes.size() == 2 || input_shapes.size() == 3); NODE_VALIDATION_CHECK(op, input_shapes.size() == 2 || input_shapes.size() == 3);
const auto& input_pshape = input_shapes[0]; const auto& input_pshape = input_shapes[0];
@ -42,17 +43,10 @@ std::vector<TShape> shape_infer(const DeformablePSROIPooling* op, const std::vec
using DimType = typename TShape::value_type; using DimType = typename TShape::value_type;
using DimTypeVal = typename DimType::value_type; using DimTypeVal = typename DimType::value_type;
// The output shape: [num_rois, output_dim, group_size, group_size] // The output shape: [num_rois, output_dim, group_size, group_size]
return {TShape{box_coords_pshape.rank().is_static() ? box_coords_pshape[0] : DimType{}, return {TRShape{box_coords_pshape.rank().is_static() ? box_coords_pshape[0] : DimType{},
static_cast<DimTypeVal>(op->get_output_dim()), static_cast<DimTypeVal>(op->get_output_dim()),
static_cast<DimTypeVal>(op->get_group_size()), static_cast<DimTypeVal>(op->get_group_size()),
static_cast<DimTypeVal>(op->get_group_size())}}; static_cast<DimTypeVal>(op->get_group_size())}};
}
template <class TShape>
void shape_infer(const DeformablePSROIPooling* op,
const std::vector<TShape>& input_shapes,
std::vector<TShape>& output_shapes) {
output_shapes = shape_infer(op, input_shapes);
} }
} // namespace v1 } // namespace v1
} // namespace op } // namespace op

View File

@ -14,10 +14,10 @@ namespace ov {
namespace op { namespace op {
namespace v0 { namespace v0 {
template <class TShape> template <class TShape, class TRShape = result_shape_t<TShape>>
std::vector<TShape> shape_infer(const DepthToSpace* op, const std::vector<TShape>& input_shapes) { std::vector<TRShape> shape_infer(const DepthToSpace* op, const std::vector<TShape>& input_shapes) {
using TDim = typename TShape::value_type; using TDim = typename std::iterator_traits<typename TShape::iterator>::value_type;
using TVal = typename TDim::value_type; using TVal = typename TShape::value_type::value_type;
NODE_VALIDATION_CHECK(op, input_shapes.size() == 1); NODE_VALIDATION_CHECK(op, input_shapes.size() == 1);
const auto& data_shape = input_shapes[0]; const auto& data_shape = input_shapes[0];
@ -34,7 +34,7 @@ std::vector<TShape> shape_infer(const DepthToSpace* op, const std::vector<TShape
const auto divisor = static_cast<TVal>(std::pow(block_size, data_shape.size() - spatial_dim_offset)); const auto divisor = static_cast<TVal>(std::pow(block_size, data_shape.size() - spatial_dim_offset));
NODE_VALIDATION_CHECK(op, divisor != 0, "DepthToSpace: The divisor must not be 0"); NODE_VALIDATION_CHECK(op, divisor != 0, "DepthToSpace: The divisor must not be 0");
auto out_shape = data_shape; auto out_shape = TRShape(data_shape);
out_shape[1] /= divisor; out_shape[1] /= divisor;
check_divided_result(op, out_shape[1], data_shape[1], divisor); check_divided_result(op, out_shape[1], data_shape[1], divisor);
std::for_each(out_shape.begin() + spatial_dim_offset, out_shape.end(), [&block_size](TDim& d) { std::for_each(out_shape.begin() + spatial_dim_offset, out_shape.end(), [&block_size](TDim& d) {
@ -45,12 +45,6 @@ std::vector<TShape> shape_infer(const DepthToSpace* op, const std::vector<TShape
return {PartialShape::dynamic()}; return {PartialShape::dynamic()};
} }
} }
template <class TShape>
void shape_infer(const DepthToSpace* op, const std::vector<TShape>& input_shapes, std::vector<TShape>& output_shapes) {
output_shapes = shape_infer(op, input_shapes);
}
} // namespace v0 } // namespace v0
} // namespace op } // namespace op
} // namespace ov } // namespace ov

View File

@ -6,6 +6,8 @@
#include <openvino/op/detection_output.hpp> #include <openvino/op/detection_output.hpp>
#include "utils.hpp"
namespace ov { namespace ov {
namespace op { namespace op {
namespace util { namespace util {
@ -118,17 +120,17 @@ void compute_num_classes(const DetectionOutputBase* op,
} }
} }
template <class T> template <class T, class TRShape = result_shape_t<T>>
void shape_infer_base(const DetectionOutputBase* op, std::vector<TRShape> shape_infer_base(const DetectionOutputBase* op,
const DetectionOutputBase::AttributesBase& attrs, const DetectionOutputBase::AttributesBase& attrs,
const std::vector<T>& input_shapes, const std::vector<T>& input_shapes,
std::vector<T>& output_shapes, int64_t attribute_num_classes) {
int64_t attribute_num_classes) { using dim_t = typename T::value_type;
using dim_t = typename std::iterator_traits<typename T::iterator>::value_type;
using val_type = typename dim_t::value_type; using val_type = typename dim_t::value_type;
NODE_VALIDATION_CHECK(op, (input_shapes.size() == 3 || input_shapes.size() == 5) && output_shapes.size() == 1); NODE_VALIDATION_CHECK(op, (input_shapes.size() == 3 || input_shapes.size() == 5));
auto output_shapes = std::vector<TRShape>(1);
auto& ret_output_shape = output_shapes[0]; auto& ret_output_shape = output_shapes[0];
ret_output_shape.resize(4); ret_output_shape.resize(4);
@ -310,6 +312,7 @@ void shape_infer_base(const DetectionOutputBase* op,
} else { } else {
ret_output_shape[2] = dim_num_images * dim_num_prior_boxes * dim_num_classes; ret_output_shape[2] = dim_num_images * dim_num_prior_boxes * dim_num_classes;
} }
return output_shapes;
} }
} // namespace util } // namespace util
@ -319,12 +322,11 @@ void shape_infer_base(const DetectionOutputBase* op,
namespace ov { namespace ov {
namespace op { namespace op {
namespace v0 { namespace v0 {
template <class T> template <class TShape, class TRShape = result_shape_t<TShape>>
void shape_infer(const DetectionOutput* op, const std::vector<T>& input_shapes, std::vector<T>& output_shapes) { std::vector<TRShape> shape_infer(const DetectionOutput* op, const std::vector<TShape>& input_shapes) {
const auto& attrs = op->get_attrs(); const auto& attrs = op->get_attrs();
ov::op::util::shape_infer_base(op, attrs, input_shapes, output_shapes, attrs.num_classes); return ov::op::util::shape_infer_base(op, attrs, input_shapes, attrs.num_classes);
} }
} // namespace v0 } // namespace v0
} // namespace op } // namespace op
} // namespace ov } // namespace ov
@ -332,12 +334,11 @@ void shape_infer(const DetectionOutput* op, const std::vector<T>& input_shapes,
namespace ov { namespace ov {
namespace op { namespace op {
namespace v8 { namespace v8 {
template <class TShape, class TRShape = result_shape_t<TShape>>
template <class T> std::vector<TRShape> shape_infer(const DetectionOutput* op, const std::vector<TShape>& input_shapes) {
void shape_infer(const DetectionOutput* op, const std::vector<T>& input_shapes, std::vector<T>& output_shapes) { const auto& attrs = op->get_attrs();
ov::op::util::shape_infer_base(op, op->get_attrs(), input_shapes, output_shapes, -1); return ov::op::util::shape_infer_base(op, attrs, input_shapes, -1);
} }
} // namespace v8 } // namespace v8
} // namespace op } // namespace op
} // namespace ov } // namespace ov

View File

@ -12,6 +12,27 @@ namespace ov {
namespace util { namespace util {
namespace dim { namespace dim {
template <class TDim, typename std::enable_if<std::is_arithmetic<TDim>::value>::type* = nullptr>
constexpr bool is_static(const TDim) {
return true;
}
template <class TDim, typename std::enable_if<!std::is_arithmetic<TDim>::value>::type* = nullptr>
constexpr bool is_static(const TDim& d) {
return d.is_static();
}
template <class TDim>
constexpr typename std::enable_if<std::is_arithmetic<TDim>::value, TDim>::type get_length(const TDim& d) {
return d;
}
template <class TDim>
constexpr typename std::enable_if<!std::is_arithmetic<TDim>::value, typename TDim::value_type>::type get_length(
const TDim& d) {
return d.get_length();
}
constexpr int64_t inf_bound = -1; //!< Infinite bound value for dimension. constexpr int64_t inf_bound = -1; //!< Infinite bound value for dimension.
/** /**
@ -132,10 +153,12 @@ typename std::enable_if<std::is_class<TDim>::value, TDim>::type padded(const TDi
* @param stride Kernel stride. * @param stride Kernel stride.
* @return Pair of left, right padding values for input dimension. * @return Pair of left, right padding values for input dimension.
*/ */
template <class TDim, class T = typename TDim::value_type> template <
class TDim,
class T = typename std::conditional<std::is_arithmetic<TDim>::value, size_t, typename Dimension::value_type>::type>
inline std::pair<T, T> padding(const TDim& dim, const int64_t kernel_size, const int64_t dilation, int64_t stride) { inline std::pair<T, T> padding(const TDim& dim, const int64_t kernel_size, const int64_t dilation, int64_t stride) {
if (dim.is_static()) { if (dim::is_static(dim)) {
const auto dim_size = static_cast<int64_t>(dim.get_length()); const auto dim_size = static_cast<int64_t>(dim::get_length(dim));
const auto dilated_kernel = dilated(kernel_size, dilation); const auto dilated_kernel = dilated(kernel_size, dilation);
const int64_t tmp = (dim_size + stride - 1) / stride; const int64_t tmp = (dim_size + stride - 1) / stride;

View File

@ -12,9 +12,9 @@ namespace ov {
namespace op { namespace op {
namespace v7 { namespace v7 {
template <class T> template <class T, class TRShape = result_shape_t<T>>
void shape_infer(const Einsum* op, const std::vector<T>& input_shapes, std::vector<T>& output_shapes) { std::vector<TRShape> shape_infer(const Einsum* op, const std::vector<T>& input_shapes) {
using DimType = typename std::iterator_traits<typename T::iterator>::value_type; using DimType = typename T::value_type;
// check that equation has correct format and extract input and output subscripts // check that equation has correct format and extract input and output subscripts
std::vector<std::string> input_subscripts; std::vector<std::string> input_subscripts;
@ -25,11 +25,10 @@ void shape_infer(const Einsum* op, const std::vector<T>& input_shapes, std::vect
NODE_VALIDATION_CHECK(op, NODE_VALIDATION_CHECK(op,
input_subscripts.size() == input_shapes.size(), input_subscripts.size() == input_shapes.size(),
"Equation must contain a number of subscripts equal to a number of Einsum inputs."); "Equation must contain a number of subscripts equal to a number of Einsum inputs.");
NODE_VALIDATION_CHECK(op, output_shapes.size() == 1);
// create a dictionary with dimension sizes (or ranges in case of dynamic shapes) for each label // create a dictionary with dimension sizes (or ranges in case of dynamic shapes) for each label
// and check their compatibility in case of repeating labels // and check their compatibility in case of repeating labels
std::unordered_map<std::string, T> label_to_shape; std::unordered_map<std::string, TRShape> label_to_shape;
for (size_t input_idx = 0; input_idx < input_shapes.size(); ++input_idx) { for (size_t input_idx = 0; input_idx < input_shapes.size(); ++input_idx) {
const auto& pshape = input_shapes[input_idx]; const auto& pshape = input_shapes[input_idx];
@ -53,9 +52,9 @@ void shape_infer(const Einsum* op, const std::vector<T>& input_shapes, std::vect
if (label_to_shape.find(label) == label_to_shape.end()) { if (label_to_shape.find(label) == label_to_shape.end()) {
label_to_shape[label] = current_sub_pshape; label_to_shape[label] = current_sub_pshape;
} else { } else {
bool is_broadcast_success = T::broadcast_merge_into(label_to_shape[label], bool is_broadcast_success = TRShape::broadcast_merge_into(label_to_shape[label],
current_sub_pshape, current_sub_pshape,
op::AutoBroadcastType::NUMPY); op::AutoBroadcastType::NUMPY);
NODE_VALIDATION_CHECK(op, NODE_VALIDATION_CHECK(op,
is_broadcast_success, is_broadcast_success,
"Input dimensions labeled with ellipsis for Einsum " "Input dimensions labeled with ellipsis for Einsum "
@ -64,13 +63,13 @@ void shape_infer(const Einsum* op, const std::vector<T>& input_shapes, std::vect
dim_ind += num_broadcasted_dims; dim_ind += num_broadcasted_dims;
} else { } else {
if (label_to_shape.find(label) == label_to_shape.end()) { if (label_to_shape.find(label) == label_to_shape.end()) {
label_to_shape[label] = T{pshape[dim_ind]}; label_to_shape[label] = TRShape{pshape[dim_ind]};
} else { } else {
NODE_VALIDATION_CHECK(op, NODE_VALIDATION_CHECK(op,
label_to_shape[label].compatible(T{pshape[label_ind]}), label_to_shape[label].compatible(TRShape{pshape[label_ind]}),
"Different input dimensions indicated by the same labels for Einsum " "Different input dimensions indicated by the same labels for Einsum "
"must be compatible."); "must be compatible.");
OPENVINO_ASSERT(T::merge_into(label_to_shape[label], T{pshape[dim_ind]})); OPENVINO_ASSERT(TRShape::merge_into(label_to_shape[label], TRShape{pshape[dim_ind]}));
} }
++dim_ind; ++dim_ind;
} }
@ -91,10 +90,9 @@ void shape_infer(const Einsum* op, const std::vector<T>& input_shapes, std::vect
// compute the output shape // compute the output shape
const auto output_labels = Einsum::extract_labels(output_subscript); const auto output_labels = Einsum::extract_labels(output_subscript);
auto output_shapes = std::vector<TRShape>(1);
auto& output_shape = output_shapes[0]; auto& output_shape = output_shapes[0];
output_shape.resize(0);
for (auto const& output_label : output_labels) { for (auto const& output_label : output_labels) {
NODE_VALIDATION_CHECK(op, NODE_VALIDATION_CHECK(op,
label_to_shape.find(output_label) != label_to_shape.end(), label_to_shape.find(output_label) != label_to_shape.end(),
@ -104,6 +102,7 @@ void shape_infer(const Einsum* op, const std::vector<T>& input_shapes, std::vect
label_to_shape[output_label].begin(), label_to_shape[output_label].begin(),
label_to_shape[output_label].end()); label_to_shape[output_label].end());
} }
return output_shapes;
} }
} // namespace v7 } // namespace v7
} // namespace op } // namespace op

View File

@ -0,0 +1,35 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "openvino/op/util/binary_elementwise_arithmetic.hpp"
#include "openvino/op/util/binary_elementwise_comparison.hpp"
#include "openvino/op/util/binary_elementwise_logical.hpp"
#include "utils.hpp"
namespace ov {
namespace op {
template <class OpType, class T, class TRShape = result_shape_t<T>>
std::vector<TRShape> eltwise_shape_infer(const OpType* op, const std::vector<T>& input_shapes) {
NODE_VALIDATION_CHECK(op, input_shapes.size() == 2, "Incorrect number of input/output shapes");
auto output_shapes = std::vector<TRShape>{input_shapes[0]};
auto& output_shape = output_shapes[0];
const auto& autob = op->get_autob();
if (autob.m_type == AutoBroadcastType::NONE) {
NODE_VALIDATION_CHECK(op,
TRShape::merge_into(output_shape, input_shapes[1]),
"Argument shapes are inconsistent.");
} else if (autob.m_type == AutoBroadcastType::NUMPY || autob.m_type == AutoBroadcastType::PDPD) {
NODE_VALIDATION_CHECK(op,
TRShape::broadcast_merge_into(output_shape, input_shapes[1], autob),
"Argument shapes are inconsistent.");
} else {
NODE_VALIDATION_CHECK(op, false, "Unsupported auto broadcast specification");
}
return output_shapes;
}
} // namespace op
} // namespace ov

View File

@ -12,19 +12,11 @@ namespace ov {
namespace op { namespace op {
namespace v3 { namespace v3 {
template <class TShape> template <class TShape, class TRShape = result_shape_t<TShape>>
void shape_infer(const EmbeddingSegmentsSum* op, std::vector<TRShape> shape_infer(const EmbeddingSegmentsSum* op,
const std::vector<TShape>& input_shapes, const std::vector<TShape>& input_shapes,
std::vector<TShape>& output_shapes, const ITensorAccessor& ta = make_tensor_accessor()) {
const std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>>& constant_data = {}) { // const std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>>& constant_data = {}) {
output_shapes = shape_infer(op, input_shapes, constant_data);
}
template <class TShape>
std::vector<TShape> shape_infer(
const EmbeddingSegmentsSum* op,
const std::vector<TShape>& input_shapes,
const std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>>& constant_data = {}) {
const auto input_size = input_shapes.size(); const auto input_size = input_shapes.size();
NODE_VALIDATION_CHECK(op, input_size >= 4 && input_size <= 6); NODE_VALIDATION_CHECK(op, input_size >= 4 && input_size <= 6);
@ -58,12 +50,11 @@ std::vector<TShape> shape_infer(
"INDICES and PER_SAMPLE_WEIGHTS shape must be same."); "INDICES and PER_SAMPLE_WEIGHTS shape must be same.");
} }
const auto& emb_table_shape = input_shapes[EMB_TABLE]; const auto& emb_table_shape = input_shapes[EMB_TABLE];
TShape result_shape = emb_table_shape; TRShape result_shape = emb_table_shape;
if (emb_table_shape.rank().is_static()) { if (emb_table_shape.rank().is_static()) {
NODE_VALIDATION_CHECK(op, emb_table_shape.size() > 0, "EMB_TABLE can't be a scalar."); NODE_VALIDATION_CHECK(op, emb_table_shape.size() > 0, "EMB_TABLE can't be a scalar.");
TShape segments_value; if (auto segments_value = get_input_const_data_as_shape<TRShape>(op, NUM_SEGMENTS, ta)) {
if (get_data_as_shape<TShape>(NUM_SEGMENTS, op, segments_value, constant_data)) { result_shape[0] = (*segments_value)[0];
result_shape[0] = segments_value[0];
} else { } else {
result_shape[0] = Dimension::dynamic(); result_shape[0] = Dimension::dynamic();
} }

View File

@ -4,6 +4,7 @@
#pragma once #pragma once
#include "openvino/core/node.hpp" #include "openvino/core/node.hpp"
#include "utils.hpp"
namespace ov { namespace ov {
namespace op { namespace op {
@ -22,11 +23,11 @@ namespace embedding {
* \return The copy of the `emb_table_shape` with the first dimsnsion overwritten by `dim_shape_src[0]` if the rank is * \return The copy of the `emb_table_shape` with the first dimsnsion overwritten by `dim_shape_src[0]` if the rank is
* static, otherwise fully dynamic shape with dynamic rank. * static, otherwise fully dynamic shape with dynamic rank.
*/ */
template <class TShape> template <class TShape, class TRShape = result_shape_t<TShape>>
TShape out_shape_infer(const ov::Node* op, const TShape& emb_table_shape, const TShape& dim_shape_src) { TRShape out_shape_infer(const ov::Node* op, const TShape& emb_table_shape, const TShape& dim_shape_src) {
if (emb_table_shape.rank().is_static()) { if (emb_table_shape.rank().is_static()) {
NODE_VALIDATION_CHECK(op, emb_table_shape.size() > 0, "EMB_TABLE can't be a scalar."); NODE_VALIDATION_CHECK(op, emb_table_shape.size() > 0, "EMB_TABLE can't be a scalar.");
auto out_shape = emb_table_shape; auto out_shape = TRShape(emb_table_shape);
out_shape[0] = dim_shape_src.rank().is_static() ? dim_shape_src[0] : Dimension::dynamic(); out_shape[0] = dim_shape_src.rank().is_static() ? dim_shape_src[0] : Dimension::dynamic();
return out_shape; return out_shape;
} }

View File

@ -13,9 +13,9 @@ namespace ov {
namespace op { namespace op {
namespace util { namespace util {
template <class TShape> template <class TShape, class TRShape = result_shape_t<TShape>>
std::vector<TShape> shape_infer(const ov::op::util::EmbeddingBagOffsetsBase* op, std::vector<TRShape> shape_infer(const ov::op::util::EmbeddingBagOffsetsBase* op,
const std::vector<TShape>& input_shapes) { const std::vector<TShape>& input_shapes) {
const auto input_size = input_shapes.size(); const auto input_size = input_shapes.size();
NODE_VALIDATION_CHECK(op, (input_size >= 3 && input_size <= 5)); NODE_VALIDATION_CHECK(op, (input_size >= 3 && input_size <= 5));
@ -45,14 +45,6 @@ std::vector<TShape> shape_infer(const ov::op::util::EmbeddingBagOffsetsBase* op,
return {embedding::out_shape_infer(op, input_shapes[EMB_TABLE], input_shapes[OFFSETS])}; return {embedding::out_shape_infer(op, input_shapes[EMB_TABLE], input_shapes[OFFSETS])};
} }
template <class TShape>
void shape_infer(const ov::op::util::EmbeddingBagOffsetsBase* op,
const std::vector<TShape>& input_shapes,
std::vector<TShape>& output_shapes) {
output_shapes = shape_infer(op, input_shapes);
}
} // namespace util } // namespace util
} // namespace op } // namespace op
} // namespace ov } // namespace ov

View File

@ -13,9 +13,9 @@ namespace ov {
namespace op { namespace op {
namespace util { namespace util {
template <class TShape> template <class TShape, class TRShape = result_shape_t<TShape>>
std::vector<TShape> shape_infer(const ov::op::util::EmbeddingBagPackedBase* op, std::vector<TRShape> shape_infer(const ov::op::util::EmbeddingBagPackedBase* op,
const std::vector<TShape>& input_shapes) { const std::vector<TShape>& input_shapes) {
const auto input_size = input_shapes.size(); const auto input_size = input_shapes.size();
NODE_VALIDATION_CHECK(op, input_size == 2 || input_size == 3); NODE_VALIDATION_CHECK(op, input_size == 2 || input_size == 3);
@ -23,7 +23,7 @@ std::vector<TShape> shape_infer(const ov::op::util::EmbeddingBagPackedBase* op,
constexpr size_t INDICES = 1; constexpr size_t INDICES = 1;
constexpr size_t PER_SAMPLE_WEIGHTS = 2; constexpr size_t PER_SAMPLE_WEIGHTS = 2;
auto indices_shape = input_shapes[INDICES]; auto indices_shape = TRShape(input_shapes[INDICES]);
NODE_VALIDATION_CHECK(op, indices_shape.rank().compatible(2), "INDICES must be 2D."); NODE_VALIDATION_CHECK(op, indices_shape.rank().compatible(2), "INDICES must be 2D.");
if (input_size == 3) { if (input_size == 3) {
@ -32,18 +32,10 @@ std::vector<TShape> shape_infer(const ov::op::util::EmbeddingBagPackedBase* op,
"PER_SAMPLE_WEIGHTS must be 2D."); "PER_SAMPLE_WEIGHTS must be 2D.");
NODE_VALIDATION_CHECK(op, NODE_VALIDATION_CHECK(op,
TShape::merge_into(indices_shape, input_shapes[PER_SAMPLE_WEIGHTS]), TRShape::merge_into(indices_shape, input_shapes[PER_SAMPLE_WEIGHTS]),
"INDICES and PER_SAMPLE_WEIGHTS shape must be same."); "INDICES and PER_SAMPLE_WEIGHTS shape must be same.");
} }
return {embedding::out_shape_infer(op, input_shapes[EMB_TABLE], TShape(indices_shape))};
return {embedding::out_shape_infer(op, input_shapes[EMB_TABLE], indices_shape)};
}
template <class TShape>
void shape_infer(const ov::op::util::EmbeddingBagPackedBase* op,
const std::vector<TShape>& input_shapes,
std::vector<TShape>& output_shapes) {
output_shapes = shape_infer(op, input_shapes);
} }
} // namespace util } // namespace util
} // namespace op } // namespace op

View File

@ -5,14 +5,14 @@
#pragma once #pragma once
#include "openvino/op/experimental_detectron_detection_output.hpp" #include "openvino/op/experimental_detectron_detection_output.hpp"
#include "utils.hpp"
namespace ov { namespace ov {
namespace op { namespace op {
namespace v6 { namespace v6 {
template <class TShape> template <class TShape, class TRShape = result_shape_t<TShape>>
std::vector<TShape> shape_infer(const ExperimentalDetectronDetectionOutput* op, std::vector<TRShape> shape_infer(const ExperimentalDetectronDetectionOutput* op,
const std::vector<TShape>& input_shapes) { const std::vector<TShape>& input_shapes) {
NODE_VALIDATION_CHECK(op, input_shapes.size() == 4); NODE_VALIDATION_CHECK(op, input_shapes.size() == 4);
using TDim = typename TShape::value_type; using TDim = typename TShape::value_type;
@ -55,7 +55,7 @@ std::vector<TShape> shape_infer(const ExperimentalDetectronDetectionOutput* op,
} }
NODE_VALIDATION_CHECK(op, NODE_VALIDATION_CHECK(op,
im_info_shape.compatible({1, 3}), im_info_shape.compatible(TRShape{1, 3}),
"Input image info shape must be compatible with [1,3]."); "Input image info shape must be compatible with [1,3].");
if (rois_shape_rank_is_static && deltas_shape_rank_is_static && scores_shape_is_static) { if (rois_shape_rank_is_static && deltas_shape_rank_is_static && scores_shape_is_static) {
@ -76,19 +76,11 @@ std::vector<TShape> shape_infer(const ExperimentalDetectronDetectionOutput* op,
num_batches_scores); num_batches_scores);
} }
auto output_shapes = std::vector<TShape>(3, TShape{TDim(attrs.max_detections_per_image)}); auto output_shapes = std::vector<TRShape>(3, TRShape{TDim(attrs.max_detections_per_image)});
output_shapes[0].push_back(4); output_shapes[0].push_back(4);
return output_shapes; return output_shapes;
} }
template <class TShape>
void shape_infer(const ExperimentalDetectronDetectionOutput* op,
const std::vector<TShape>& input_shapes,
std::vector<TShape>& output_shapes) {
output_shapes = shape_infer(op, input_shapes);
}
} // namespace v6 } // namespace v6
} // namespace op } // namespace op
} // namespace ov } // namespace ov

View File

@ -5,14 +5,15 @@
#pragma once #pragma once
#include "openvino/op/experimental_detectron_generate_proposals.hpp" #include "openvino/op/experimental_detectron_generate_proposals.hpp"
#include "utils.hpp"
namespace ov { namespace ov {
namespace op { namespace op {
namespace v6 { namespace v6 {
template <class TShape> template <class TShape, class TRShape = result_shape_t<TShape>>
std::vector<TShape> shape_infer(const ExperimentalDetectronGenerateProposalsSingleImage* op, std::vector<TRShape> shape_infer(const ExperimentalDetectronGenerateProposalsSingleImage* op,
const std::vector<TShape>& input_shapes) { const std::vector<TShape>& input_shapes) {
NODE_VALIDATION_CHECK(op, input_shapes.size() == 4); NODE_VALIDATION_CHECK(op, input_shapes.size() == 4);
const auto& im_info_shape = input_shapes[0]; const auto& im_info_shape = input_shapes[0];
@ -69,18 +70,10 @@ std::vector<TShape> shape_infer(const ExperimentalDetectronGenerateProposalsSing
} }
auto post_nms_count = static_cast<typename TShape::value_type>(op->get_attrs().post_nms_count); auto post_nms_count = static_cast<typename TShape::value_type>(op->get_attrs().post_nms_count);
auto output_shapes = std::vector<TShape>(2, TShape{post_nms_count}); auto output_shapes = std::vector<TRShape>(2, TRShape{post_nms_count});
output_shapes[0].push_back(4); output_shapes[0].push_back(4);
return output_shapes; return output_shapes;
} }
template <class TShape>
void shape_infer(const ExperimentalDetectronGenerateProposalsSingleImage* op,
const std::vector<TShape>& input_shapes,
std::vector<TShape>& output_shapes) {
output_shapes = shape_infer(op, input_shapes);
}
} // namespace v6 } // namespace v6
} // namespace op } // namespace op
} // namespace ov } // namespace ov

View File

@ -4,14 +4,15 @@
#pragma once #pragma once
#include "openvino/op/experimental_detectron_prior_grid_generator.hpp" #include "openvino/op/experimental_detectron_prior_grid_generator.hpp"
#include "utils.hpp"
namespace ov { namespace ov {
namespace op { namespace op {
namespace v6 { namespace v6 {
template <class TShape> template <class TShape, class TRShape = result_shape_t<TShape>>
std::vector<TShape> shape_infer(const ExperimentalDetectronPriorGridGenerator* op, std::vector<TRShape> shape_infer(const ExperimentalDetectronPriorGridGenerator* op,
const std::vector<TShape>& input_shapes) { const std::vector<TShape>& input_shapes) {
NODE_VALIDATION_CHECK(op, input_shapes.size() == 3); NODE_VALIDATION_CHECK(op, input_shapes.size() == 3);
const auto& priors_shape = input_shapes[0]; const auto& priors_shape = input_shapes[0];
const auto& featmap_shape = input_shapes[1]; const auto& featmap_shape = input_shapes[1];
@ -20,7 +21,7 @@ std::vector<TShape> shape_infer(const ExperimentalDetectronPriorGridGenerator* o
const auto is_flatten = op->get_attrs().flatten; const auto is_flatten = op->get_attrs().flatten;
const size_t output_size = is_flatten ? 2 : 4; const size_t output_size = is_flatten ? 2 : 4;
TShape output_shape; TRShape output_shape;
output_shape.resize(output_size); output_shape.resize(output_size);
output_shape[output_size - 1] = 4; output_shape[output_size - 1] = 4;
@ -70,14 +71,6 @@ std::vector<TShape> shape_infer(const ExperimentalDetectronPriorGridGenerator* o
return {output_shape}; return {output_shape};
} }
template <class TShape>
void shape_infer(const ExperimentalDetectronPriorGridGenerator* op,
const std::vector<TShape>& input_shapes,
std::vector<TShape>& output_shapes) {
output_shapes = shape_infer(op, input_shapes);
}
} // namespace v6 } // namespace v6
} // namespace op } // namespace op
} // namespace ov } // namespace ov

View File

@ -16,14 +16,14 @@ namespace v6 {
// outputs: // outputs:
// 1. out_shape = [number_of_ROIs, number_of_channels, output_size, output_size] // 1. out_shape = [number_of_ROIs, number_of_channels, output_size, output_size]
// 2. out_rois_shape = [number_of_ROIs, 4] // 2. out_rois_shape = [number_of_ROIs, 4]
template <class TShape> template <class TShape, class TRShape = result_shape_t<TShape>>
std::vector<TShape> shape_infer(const ExperimentalDetectronROIFeatureExtractor* op, std::vector<TRShape> shape_infer(const ExperimentalDetectronROIFeatureExtractor* op,
const std::vector<TShape>& input_shapes) { const std::vector<TShape>& input_shapes) {
using TDim = typename TShape::value_type; using TDim = typename TRShape::value_type;
using namespace ov::util; using namespace ov::util;
NODE_VALIDATION_CHECK(op, input_shapes.size() >= 2); NODE_VALIDATION_CHECK(op, input_shapes.size() >= 2);
auto output_shapes = std::vector<TShape>(); auto output_shapes = std::vector<TRShape>();
output_shapes.reserve(2); output_shapes.reserve(2);
const auto& rois_shape = input_shapes[0]; const auto& rois_shape = input_shapes[0];

View File

@ -2,14 +2,15 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include <openvino/op/experimental_detectron_topkrois.hpp> #include "openvino/op/experimental_detectron_topkrois.hpp"
#include "utils.hpp"
namespace ov { namespace ov {
namespace op { namespace op {
namespace v6 { namespace v6 {
template <class TShape> template <class TShape, class TRShape = result_shape_t<TShape>>
std::vector<TShape> shape_infer(ExperimentalDetectronTopKROIs* op, const std::vector<TShape>& input_shapes) { std::vector<TRShape> shape_infer(ExperimentalDetectronTopKROIs* op, const std::vector<TShape>& input_shapes) {
NODE_VALIDATION_CHECK(op, input_shapes.size() == 2); NODE_VALIDATION_CHECK(op, input_shapes.size() == 2);
const auto& input_rois_shape = input_shapes[0]; const auto& input_rois_shape = input_shapes[0];
@ -45,13 +46,6 @@ std::vector<TShape> shape_infer(ExperimentalDetectronTopKROIs* op, const std::ve
return {{static_cast<typename TShape::value_type>(op->get_max_rois()), 4}}; return {{static_cast<typename TShape::value_type>(op->get_max_rois()), 4}};
} }
template <class TShape>
void shape_infer(ExperimentalDetectronTopKROIs* op,
const std::vector<TShape>& input_shapes,
std::vector<TShape>& output_shapes) {
output_shapes = shape_infer(op, input_shapes);
}
} // namespace v6 } // namespace v6
} // namespace op } // namespace op
} // namespace ov } // namespace ov

View File

@ -4,6 +4,8 @@
#pragma once #pragma once
#include <openvino/op/extractimagepatches.hpp> #include <openvino/op/extractimagepatches.hpp>
#include "utils.hpp"
namespace ov { namespace ov {
namespace op { namespace op {
namespace v3 { namespace v3 {
@ -21,44 +23,45 @@ int32_t inline calc_shape_padding(const int32_t input,
return out < 0 ? 0 : out; return out < 0 ? 0 : out;
} }
template <class T> template <class T, class TRShape = result_shape_t<T>>
void shape_infer(const ExtractImagePatches* op, const std::vector<T>& input_shapes, std::vector<T>& output_shapes) { std::vector<TRShape> shape_infer(const ExtractImagePatches* op, const std::vector<T>& input_shapes) {
NODE_VALIDATION_CHECK(op, input_shapes.size() == 1 && output_shapes.size() == 1); NODE_VALIDATION_CHECK(op, input_shapes.size() == 1);
using DimType = typename std::iterator_traits<typename T::iterator>::value_type; using DimType = typename T::value_type;
const auto& input_shape = input_shapes[0]; const auto& input_shape = input_shapes[0];
auto output_shapes = std::vector<TRShape>(1);
auto& output_shape = output_shapes[0]; auto& output_shape = output_shapes[0];
output_shape.resize(4); output_shape.resize(4);
NODE_VALIDATION_CHECK(op, input_shape.rank().compatible(4), "input tensor must be 4D tensor."); NODE_VALIDATION_CHECK(op, input_shape.rank().compatible(4), "input tensor must be 4D tensor.");
NODE_VALIDATION_CHECK(op, NODE_VALIDATION_CHECK(op,
op->m_patch_sizes.size() == 2, op->get_sizes().size() == 2,
"Attribute sizes should be in [size_rows, size_cols] format."); "Attribute sizes should be in [size_rows, size_cols] format.");
NODE_VALIDATION_CHECK(op, NODE_VALIDATION_CHECK(op,
op->m_patch_movement_strides.size() == 2, op->get_strides().size() == 2,
"Attribute strides should be in [stride_rows, stride_cols] format."); "Attribute strides should be in [stride_rows, stride_cols] format.");
NODE_VALIDATION_CHECK(op, NODE_VALIDATION_CHECK(op,
op->m_patch_movement_strides[0] > 0 && op->m_patch_movement_strides[1] > 0, op->get_strides()[0] > 0 && op->get_strides()[1] > 0,
"Attribute strides should be strictly greater than zeros in values."); "Attribute strides should be strictly greater than zeros in values.");
NODE_VALIDATION_CHECK(op, NODE_VALIDATION_CHECK(op,
op->m_patch_selection_rates.size() == 2, op->get_rates().size() == 2,
"Attribute rates should be in [rate_rows, rate_cols] format."); "Attribute rates should be in [rate_rows, rate_cols] format.");
NODE_VALIDATION_CHECK(op, NODE_VALIDATION_CHECK(op,
op->m_patch_selection_rates[0] > 0 && op->m_patch_selection_rates[1] > 0, op->get_rates()[0] > 0 && op->get_rates()[1] > 0,
"Attribute rates should be strictly greater than zeros in values."); "Attribute rates should be strictly greater than zeros in values.");
NODE_VALIDATION_CHECK( NODE_VALIDATION_CHECK(op,
op, op->get_auto_pad() == PadType::VALID || op->get_auto_pad() == PadType::SAME_LOWER ||
op->m_padding == PadType::VALID || op->m_padding == PadType::SAME_LOWER || op->m_padding == PadType::SAME_UPPER, op->get_auto_pad() == PadType::SAME_UPPER,
"Attribute padding should be in either valid or same_lower or same_upper."); "Attribute padding should be in either valid or same_lower or same_upper.");
if (input_shape.rank().is_static()) { if (input_shape.rank().is_static()) {
// Determine batch & output_depth // Determine batch & output_depth
output_shape[0] = input_shape[0]; output_shape[0] = input_shape[0];
output_shape[1] = input_shape[1] * op->m_patch_sizes[0] * op->m_patch_sizes[1]; output_shape[1] = input_shape[1] * op->get_sizes()[0] * op->get_sizes()[1];
// Determine spatial shape // Determine spatial shape
if (input_shape[2].is_static() && input_shape[3].is_static()) { if (input_shape[2].is_static() && input_shape[3].is_static()) {
int32_t input_rows = static_cast<int32_t>(input_shape[2].get_length()); int32_t input_rows = static_cast<int32_t>(input_shape[2].get_length());
@ -67,19 +70,19 @@ void shape_infer(const ExtractImagePatches* op, const std::vector<T>& input_shap
int32_t out_cols(0); int32_t out_cols(0);
if (input_rows == 0 || input_cols == 0) { if (input_rows == 0 || input_cols == 0) {
output_shape = input_shape; output_shape = input_shape;
return; return output_shapes;
} }
out_rows = calc_shape_padding(input_rows, out_rows = calc_shape_padding(input_rows,
static_cast<int32_t>(op->m_patch_selection_rates[0]), static_cast<int32_t>(op->get_rates()[0]),
static_cast<int32_t>(op->m_patch_movement_strides[0]), static_cast<int32_t>(op->get_strides()[0]),
static_cast<int32_t>(op->m_patch_sizes[0]), static_cast<int32_t>(op->get_sizes()[0]),
op->m_padding); op->get_auto_pad());
out_cols = calc_shape_padding(input_cols, out_cols = calc_shape_padding(input_cols,
static_cast<int32_t>(op->m_patch_selection_rates[1]), static_cast<int32_t>(op->get_rates()[1]),
static_cast<int32_t>(op->m_patch_movement_strides[1]), static_cast<int32_t>(op->get_strides()[1]),
static_cast<int32_t>(op->m_patch_sizes[1]), static_cast<int32_t>(op->get_sizes()[1]),
op->m_padding); op->get_auto_pad());
auto out_rows_cast = static_cast<typename DimType::value_type>(out_rows); auto out_rows_cast = static_cast<typename DimType::value_type>(out_rows);
auto out_cols_cast = static_cast<typename DimType::value_type>(out_cols); auto out_cols_cast = static_cast<typename DimType::value_type>(out_cols);
@ -88,6 +91,7 @@ void shape_infer(const ExtractImagePatches* op, const std::vector<T>& input_shap
output_shape[3] = out_cols_cast; output_shape[3] = out_cols_cast;
} }
} }
return output_shapes;
} }
} // namespace v3 } // namespace v3
} // namespace op } // namespace op

View File

@ -3,9 +3,9 @@
// //
#pragma once #pragma once
#include <array> #include <array>
#include <openvino/core/validation_util.hpp>
#include <openvino/opsets/opset9.hpp>
#include "openvino/core/validation_util.hpp"
#include "openvino/op/eye.hpp"
#include "utils.hpp" #include "utils.hpp"
namespace ov { namespace ov {
@ -19,7 +19,8 @@ void check_1D_or_scalar_shape(const ov::op::v9::Eye* op, const T& input_shape, c
NODE_VALIDATION_CHECK(op, num_rows_rank <= 1, name, " value must be a scalar or 1D tensor."); NODE_VALIDATION_CHECK(op, num_rows_rank <= 1, name, " value must be a scalar or 1D tensor.");
if (num_rows_rank == 1) { if (num_rows_rank == 1) {
NODE_VALIDATION_CHECK(op, input_shape.compatible(T{1}), name, " value input should have 1 element."); using TRShape = result_shape_t<T>;
NODE_VALIDATION_CHECK(op, input_shape.compatible(TRShape{1}), name, " value input should have 1 element.");
} }
} }
} }
@ -38,16 +39,16 @@ namespace v9 {
* *
* \param op Pointer to Eye operator. * \param op Pointer to Eye operator.
* \param input_shapes Input shapes of Eye. * \param input_shapes Input shapes of Eye.
* \param constant_data Map of constant data. Default empty. * \param ta Tensor accessor to constant data.
* \return * template <class TShape> * \return Vector with output shapes.
*/ */
template <class TShape> template <class TShape, class TRShape = result_shape_t<TShape>>
std::vector<TShape> shape_infer(const Eye* op, std::vector<TRShape> shape_infer(const Eye* op,
const std::vector<TShape>& input_shapes, const std::vector<TShape>& input_shapes,
const std::map<size_t, HostTensorPtr>& constant_data = {}) { const ITensorAccessor& ta = make_tensor_accessor()) {
const auto& inputs_count = input_shapes.size(); const auto& inputs_count = input_shapes.size();
NODE_VALIDATION_CHECK(op, (inputs_count == 3 || inputs_count == 4)); NODE_VALIDATION_CHECK(op, (inputs_count == 3 || inputs_count == 4));
TShape output_shape; TRShape output_shape;
for (size_t i = 0; i < 3; ++i) { for (size_t i = 0; i < 3; ++i) {
util::check_1D_or_scalar_shape(op, input_shapes[i], eye::shape_names[i]); util::check_1D_or_scalar_shape(op, input_shapes[i], eye::shape_names[i]);
@ -57,10 +58,11 @@ std::vector<TShape> shape_infer(const Eye* op,
const auto& batch_shape = input_shapes[3]; const auto& batch_shape = input_shapes[3];
NODE_VALIDATION_CHECK(op, batch_shape.rank().compatible(1), eye::shape_names[3], " input must be a 1D tensor."); NODE_VALIDATION_CHECK(op, batch_shape.rank().compatible(1), eye::shape_names[3], " input must be a 1D tensor.");
if (batch_shape.is_static()) { if (batch_shape.is_static()) {
if (get_data_as_shape<TShape>(3, op, output_shape, constant_data)) { if (auto batch_as_shape = get_input_const_data_as_shape<TRShape>(op, 3, ta)) {
NODE_VALIDATION_CHECK(op, NODE_VALIDATION_CHECK(op,
static_cast<int64_t>(batch_shape[0].get_length()) == static_cast<int64_t>(batch_shape[0].get_length()) ==
static_cast<int64_t>(output_shape.rank().get_length())); static_cast<int64_t>(batch_as_shape->rank().get_length()));
output_shape = std::move(*batch_as_shape);
} else { } else {
output_shape = PartialShape::dynamic(batch_shape[0].get_length()); output_shape = PartialShape::dynamic(batch_shape[0].get_length());
} }
@ -73,7 +75,7 @@ std::vector<TShape> shape_infer(const Eye* op,
constexpr auto get_non_negatives = ov::util::InTypeRange<TDimValue>(0, std::numeric_limits<TDimValue>::max()); constexpr auto get_non_negatives = ov::util::InTypeRange<TDimValue>(0, std::numeric_limits<TDimValue>::max());
for (size_t i = 0; i < 2; ++i) { for (size_t i = 0; i < 2; ++i) {
if (auto eye_dim = get_input_const_data_as_shape<TShape>(op, i, constant_data, get_non_negatives)) { if (auto eye_dim = get_input_const_data_as_shape<TRShape>(op, i, ta, get_non_negatives)) {
NODE_VALIDATION_CHECK(op, NODE_VALIDATION_CHECK(op,
eye_dim->size() == 1, eye_dim->size() == 1,
eye::shape_names[i], eye::shape_names[i],
@ -87,14 +89,6 @@ std::vector<TShape> shape_infer(const Eye* op,
return {output_shape}; return {output_shape};
} }
template <class TShape>
void shape_infer(const Eye* op,
const std::vector<TShape>& input_shapes,
std::vector<TShape>& output_shapes,
const std::map<size_t, HostTensorPtr>& constant_data = {}) {
output_shapes = shape_infer(op, input_shapes, constant_data);
}
} // namespace v9 } // namespace v9
} // namespace op } // namespace op
} // namespace ov } // namespace ov

View File

@ -7,22 +7,25 @@
#include "utils.hpp" #include "utils.hpp"
template <class T> namespace ov {
void shape_infer(const ov::op::v0::FakeQuantize* op, namespace op {
const std::vector<T>& input_shapes, namespace v0 {
std::vector<T>& output_shapes) { template <class T, class TRShape = result_shape_t<T>>
NODE_VALIDATION_CHECK(op, input_shapes.size() == 5 && output_shapes.size() == 1); std::vector<TRShape> shape_infer(const FakeQuantize* op, const std::vector<T>& input_shapes) {
NODE_VALIDATION_CHECK(op, input_shapes.size() == 5);
T data_pshape = input_shapes[0]; TRShape data_pshape = input_shapes[0];
ov::op::AutoBroadcastSpec auto_broadcast = op->get_auto_broadcast(); ov::op::AutoBroadcastSpec auto_broadcast = op->get_auto_broadcast();
for (size_t i = 1; i <= 4; ++i) { for (size_t i = 1; i <= 4; ++i) {
if (auto_broadcast.m_type == ov::op::AutoBroadcastType::NONE) { if (auto_broadcast.m_type == ov::op::AutoBroadcastType::NONE) {
NODE_VALIDATION_CHECK(op, T::merge_into(data_pshape, input_shapes[i]), "Argument shapes are inconsistent."); NODE_VALIDATION_CHECK(op,
TRShape::merge_into(data_pshape, input_shapes[i]),
"Argument shapes are inconsistent.");
} else if (auto_broadcast.m_type == ov::op::AutoBroadcastType::NUMPY || } else if (auto_broadcast.m_type == ov::op::AutoBroadcastType::NUMPY ||
auto_broadcast.m_type == ov::op::AutoBroadcastType::PDPD) { auto_broadcast.m_type == ov::op::AutoBroadcastType::PDPD) {
NODE_VALIDATION_CHECK(op, NODE_VALIDATION_CHECK(op,
T::broadcast_merge_into(data_pshape, input_shapes[i], auto_broadcast), TRShape::broadcast_merge_into(data_pshape, input_shapes[i], auto_broadcast),
"Argument shapes are inconsistent."); "Argument shapes are inconsistent.");
} else { } else {
NODE_VALIDATION_CHECK(op, false, "Unsupported auto broadcast specification"); NODE_VALIDATION_CHECK(op, false, "Unsupported auto broadcast specification");
@ -35,5 +38,8 @@ void shape_infer(const ov::op::v0::FakeQuantize* op,
// input[1].shape = [1, 3, 4, 5] // input[1].shape = [1, 3, 4, 5]
// This controversial behavior is kept here due to backward-compatibility and the fact that // This controversial behavior is kept here due to backward-compatibility and the fact that
// frameworks do not allow such behavior too -- so the chance to have such FQ configuration is minimal // frameworks do not allow such behavior too -- so the chance to have such FQ configuration is minimal
first_input_passthrough_infer(op, input_shapes, output_shapes); return {data_pshape};
} }
} // namespace v0
} // namespace op
} // namespace ov

View File

@ -9,20 +9,19 @@
namespace ov { namespace ov {
namespace op { namespace op {
namespace util { template <class T, class TRShape = result_shape_t<T>>
template <class T> std::vector<TRShape> shape_infer(const util::FFTBase* op,
void shape_infer(const ov::op::util::FFTBase* op, const std::vector<T>& input_shapes,
const std::vector<T>& input_shapes, const ITensorAccessor& ta = make_tensor_accessor()) {
std::vector<T>& output_shapes, using DimType = typename T::value_type;
const std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>>& constant_data = {}) {
using DimType = typename std::iterator_traits<typename T::iterator>::value_type; NODE_VALIDATION_CHECK(op, (input_shapes.size() == 2 || input_shapes.size() == 3));
NODE_VALIDATION_CHECK(op, (input_shapes.size() == 2 || input_shapes.size() == 3) && output_shapes.size() == 1);
const auto& input_shape = input_shapes[0]; const auto& input_shape = input_shapes[0];
const auto& axes_shape = input_shapes[1]; const auto& axes_shape = input_shapes[1];
auto output_shapes = std::vector<TRShape>(1);
auto& output_shape = output_shapes[0]; auto& output_shape = output_shapes[0];
std::vector<int64_t> axes; auto axes = get_input_const_data_as<TRShape, int64_t>(op, 1, ta);
bool axes_are_known = get_data_as_int64<T>(1, op, axes, constant_data);
if (input_shape.rank().is_static()) { if (input_shape.rank().is_static()) {
const auto input_rank = input_shape.size(); const auto input_rank = input_shape.size();
@ -52,11 +51,11 @@ void shape_infer(const ov::op::util::FFTBase* op,
// 'r - 1 + a'. The reason is the following: real input tensor of the shape // 'r - 1 + a'. The reason is the following: real input tensor of the shape
// [n_0, ..., n_{r - 1}, 2] is interpreted as a complex tensor with the shape // [n_0, ..., n_{r - 1}, 2] is interpreted as a complex tensor with the shape
// [n_0, ..., n_{r - 1}]. // [n_0, ..., n_{r - 1}].
if (axes_shape.rank().is_static() && axes_are_known) { if (axes_shape.rank().is_static() && axes) {
const auto axis_min_value = -static_cast<int64_t>(input_rank); const auto axis_min_value = -static_cast<int64_t>(input_rank);
const auto axis_max_value = static_cast<int64_t>(input_rank) - 1; const auto axis_max_value = static_cast<int64_t>(input_rank) - 1;
ov::AxisSet axes_set; ov::AxisSet axes_set;
for (int64_t& axis : axes) { for (int64_t& axis : *axes) {
NODE_VALIDATION_CHECK(op, NODE_VALIDATION_CHECK(op,
axis_min_value < axis && axis < axis_max_value, axis_min_value < axis && axis < axis_max_value,
"FFT op axis ", "FFT op axis ",
@ -72,7 +71,7 @@ void shape_infer(const ov::op::util::FFTBase* op,
axes_set.insert(static_cast<size_t>(axis)); axes_set.insert(static_cast<size_t>(axis));
} }
NODE_VALIDATION_CHECK(op, axes.size() == axes_set.size(), "FFT op axes must be unique."); NODE_VALIDATION_CHECK(op, axes->size() == axes_set.size(), "FFT op axes must be unique.");
} }
} }
@ -97,31 +96,30 @@ void shape_infer(const ov::op::util::FFTBase* op,
} }
output_shape = input_shape; output_shape = input_shape;
if (input_shape.rank().is_static() && axes_shape.rank().is_static() && input_shapes.size() == 3 && axes_are_known) { if (input_shape.rank().is_static() && axes_shape.rank().is_static() && input_shapes.size() == 3 && axes) {
const auto& signal_size_shape = input_shapes[2]; const auto& signal_size_shape = input_shapes[2];
std::vector<int64_t> signal_size; auto signal_size = get_input_const_data_as<TRShape, int64_t>(op, 2, ta);
bool status_signal_size = get_data_as_int64<T>(2, op, signal_size, constant_data);
if (signal_size_shape.rank().is_static() && status_signal_size) { if (signal_size_shape.rank().is_static() && signal_size) {
size_t num_of_axes = axes.size(); size_t num_of_axes = axes->size();
for (size_t i = 0; i < num_of_axes; ++i) { for (size_t i = 0; i < num_of_axes; ++i) {
if (signal_size[i] == -1) { if ((*signal_size)[i] == -1) {
continue; continue;
} }
output_shape[axes[i]] = DimType(signal_size[i]); output_shape[(*axes)[i]] = DimType((*signal_size)[i]);
} }
} else if (signal_size_shape.rank().is_static()) { } else if (signal_size_shape.rank().is_static()) {
for (int64_t& axis : axes) { for (int64_t& axis : *axes) {
output_shape[axis] = ov::Dimension::dynamic(); output_shape[axis] = ov::Dimension::dynamic();
} }
} }
} else if (input_shape.rank().is_static() && (axes_shape.rank().is_dynamic() || !axes_are_known)) { } else if (input_shape.rank().is_static() && (axes_shape.rank().is_dynamic() || !axes)) {
const auto input_rank = input_shape.size(); const auto input_rank = input_shape.size();
for (size_t i = 0; i < input_rank - 1; ++i) { for (size_t i = 0; i < input_rank - 1; ++i) {
output_shape[i] = ov::Dimension::dynamic(); output_shape[i] = ov::Dimension::dynamic();
} }
} }
return output_shapes;
} }
} // namespace util
} // namespace op } // namespace op
} // namespace ov } // namespace ov

View File

@ -9,15 +9,16 @@
namespace ov { namespace ov {
namespace op { namespace op {
namespace v6 { namespace v6 {
template <class T> template <class T, class TRShape = result_shape_t<T>>
void shape_infer(const GatherElements* op, const std::vector<T>& input_shapes, std::vector<T>& output_shapes) { std::vector<TRShape> shape_infer(const GatherElements* op, const std::vector<T>& input_shapes) {
NODE_VALIDATION_CHECK(op, input_shapes.size() == 2 && output_shapes.size() == 1); NODE_VALIDATION_CHECK(op, input_shapes.size() == 2);
using DimType = typename std::iterator_traits<typename T::iterator>::value_type; using DimType = typename T::value_type;
const auto& data_pshape = input_shapes[0]; const auto& data_pshape = input_shapes[0];
const auto& indices_pshape = input_shapes[1]; const auto& indices_pshape = input_shapes[1];
auto data_rank = data_pshape.rank(); auto data_rank = data_pshape.rank();
auto indices_rank = indices_pshape.rank(); auto indices_rank = indices_pshape.rank();
auto output_shapes = std::vector<TRShape>(1);
auto& output_shape = output_shapes[0]; auto& output_shape = output_shapes[0];
int64_t axis = op->get_axis(); int64_t axis = op->get_axis();
@ -36,16 +37,16 @@ void shape_infer(const GatherElements* op, const std::vector<T>& input_shapes, s
// output has the same rank of data // output has the same rank of data
output_shape = data_pshape; output_shape = data_pshape;
output_shape[axis] = DimType(); output_shape[axis] = DimType();
return; return output_shapes;
} }
if (data_rank.is_dynamic()) { if (data_rank.is_dynamic()) {
if (indices_rank.is_dynamic()) { if (indices_rank.is_dynamic()) {
output_shape = PartialShape::dynamic(); output_shape = PartialShape::dynamic();
return; return output_shapes;
} }
output_shape = indices_pshape; output_shape = indices_pshape;
return; return output_shapes;
} }
// left only case when data_rank.is_static() && indices_rank.is_static() // left only case when data_rank.is_static() && indices_rank.is_static()
@ -70,6 +71,7 @@ void shape_infer(const GatherElements* op, const std::vector<T>& input_shapes, s
" are not consistent, `data` and `indices` must have equal or " " are not consistent, `data` and `indices` must have equal or "
"intersecting dimensions, except for the dimension at axis index.", "intersecting dimensions, except for the dimension at axis index.",
axis); axis);
return output_shapes;
} }
} // namespace v6 } // namespace v6
} // namespace op } // namespace op

View File

@ -9,8 +9,8 @@ namespace ov {
namespace op { namespace op {
namespace gather_nd { namespace gather_nd {
template <class TShape, class TOp> template <class TOp, class TShape, class TRShape = result_shape_t<TShape>>
std::vector<TShape> gather_nd_base_shape_infer(const TOp* op, const std::vector<TShape>& input_shapes) { std::vector<TRShape> gather_nd_base_shape_infer(const TOp* op, const std::vector<TShape>& input_shapes) {
NODE_VALIDATION_CHECK(op, input_shapes.size() == 2); NODE_VALIDATION_CHECK(op, input_shapes.size() == 2);
const auto& data_pshape = input_shapes[0]; const auto& data_pshape = input_shapes[0];
@ -59,17 +59,17 @@ std::vector<TShape> gather_nd_base_shape_infer(const TOp* op, const std::vector<
for (auto dim_idx = batch_dims + indices_tuple_length; dim_idx < data_pshape.size(); ++dim_idx) { for (auto dim_idx = batch_dims + indices_tuple_length; dim_idx < data_pshape.size(); ++dim_idx) {
output_dims.emplace_back(data_pshape[dim_idx]); output_dims.emplace_back(data_pshape[dim_idx]);
} }
return {TShape(std::move(output_dims))}; return {TRShape(std::move(output_dims))};
} else { } else {
return {ov::PartialShape::dynamic()}; return {ov::PartialShape::dynamic()};
} }
} }
} // namespace gather_nd } // namespace gather_nd
namespace v5 { namespace v5 {
template <class TShape> template <class TShape, class TRShape = result_shape_t<TShape>>
void shape_infer(const GatherND* op, const std::vector<TShape>& input_shapes, std::vector<TShape>& output_shapes) { std::vector<TRShape> shape_infer(const GatherND* op, const std::vector<TShape>& input_shapes) {
using DimType = typename TShape::value_type; using DimType = typename TShape::value_type;
output_shapes = gather_nd::gather_nd_base_shape_infer(op, input_shapes); auto output_shapes = gather_nd::gather_nd_base_shape_infer(op, input_shapes);
// If batch_dims > 1, batch dimensions are need to be fused // If batch_dims > 1, batch dimensions are need to be fused
auto batch_dims = op->get_batch_dims(); auto batch_dims = op->get_batch_dims();
@ -82,15 +82,16 @@ void shape_infer(const GatherND* op, const std::vector<TShape>& input_shapes, st
output_dims[0] *= dim; output_dims[0] *= dim;
}); });
output_dims.insert(output_dims.begin() + 1, output_base_shape.begin() + batch_dims, output_base_shape.end()); output_dims.insert(output_dims.begin() + 1, output_base_shape.begin() + batch_dims, output_base_shape.end());
output_shapes[0] = TShape(std::move(output_dims)); output_shapes[0] = TRShape(std::move(output_dims));
} }
return output_shapes;
} }
} // namespace v5 } // namespace v5
namespace v8 { namespace v8 {
template <class TShape> template <class TShape, class TRShape = result_shape_t<TShape>>
void shape_infer(const GatherND* op, const std::vector<TShape>& input_shapes, std::vector<TShape>& output_shapes) { std::vector<TRShape> shape_infer(const GatherND* op, const std::vector<TShape>& input_shapes) {
output_shapes = gather_nd::gather_nd_base_shape_infer(op, input_shapes); return gather_nd::gather_nd_base_shape_infer(op, input_shapes);
} }
} // namespace v8 } // namespace v8
} // namespace op } // namespace op

View File

@ -8,19 +8,18 @@
namespace ov { namespace ov {
namespace op { namespace op {
namespace util { template <class TShape, class TRShape = result_shape_t<TShape>>
template <class T> std::vector<TRShape> shape_infer(const util::GatherBase* op,
void shape_infer(const GatherBase* op, const std::vector<TShape>& input_shapes,
const std::vector<T>& input_shapes, const ITensorAccessor& tensor_accessor = make_tensor_accessor()) {
std::vector<T>& output_shapes, NODE_VALIDATION_CHECK(op, input_shapes.size() == 3);
const std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>>& constant_data = {}) {
NODE_VALIDATION_CHECK(op, input_shapes.size() == 3 && output_shapes.size() == 1);
const auto& data_pshape = input_shapes[0]; const auto& data_pshape = input_shapes[0];
const auto& indices_pshape = input_shapes[1]; const auto& indices_pshape = input_shapes[1];
const auto& axis_pshape = input_shapes[2]; const auto& axis_pshape = input_shapes[2];
auto data_rank = data_pshape.rank(); auto data_rank = data_pshape.rank();
auto indices_rank = indices_pshape.rank(); auto indices_rank = indices_pshape.rank();
auto axis_rank = axis_pshape.rank(); auto axis_rank = axis_pshape.rank();
auto output_shapes = std::vector<TRShape>(1);
auto& output_pshape = output_shapes[0]; auto& output_pshape = output_shapes[0];
if (axis_rank.is_static() && axis_pshape.is_static()) { if (axis_rank.is_static() && axis_pshape.is_static()) {
@ -37,12 +36,11 @@ void shape_infer(const GatherBase* op,
batch_dims += indices_rank.get_length(); batch_dims += indices_rank.get_length();
} }
std::vector<int64_t> axes_val; bool axis_is_set;
bool axis_is_set = get_data_as_int64<T>(2, op, axes_val, constant_data); int64_t axis;
int64_t axis = 0; if (const auto axes_val = get_input_const_data_as<TRShape, int64_t>(op, 2, tensor_accessor)) {
axis = (*axes_val)[0];
if (axis_is_set) { axis_is_set = true;
axis = axes_val[0];
if (data_rank.is_static()) { if (data_rank.is_static()) {
OPENVINO_SUPPRESS_DEPRECATED_START OPENVINO_SUPPRESS_DEPRECATED_START
@ -58,6 +56,9 @@ void shape_infer(const GatherBase* op,
batch_dims, batch_dims,
", axis = ", ", axis = ",
axis); axis);
} else {
axis_is_set = false;
axis = 0;
} }
if (indices_rank.is_static() && batch_dims >= 0) { if (indices_rank.is_static() && batch_dims >= 0) {
@ -108,7 +109,7 @@ void shape_infer(const GatherBase* op,
out_rank = out_rank - indices_rank.get_max_length(); out_rank = out_rank - indices_rank.get_max_length();
output_pshape = PartialShape::dynamic(out_rank); output_pshape = PartialShape::dynamic(out_rank);
} }
return output_shapes;
} }
} // namespace util
} // namespace op } // namespace op
} // namespace ov } // namespace ov

View File

@ -2,23 +2,26 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#pragma once #pragma once
#include <openvino/op/gather_tree.hpp>
#include "openvino/op/gather_tree.hpp"
#include "utils.hpp"
namespace ov { namespace ov {
namespace op { namespace op {
namespace v1 { namespace v1 {
template <class TShape> template <class TShape, class TRShape = result_shape_t<TShape>>
std::vector<TShape> shape_infer(const GatherTree* op, const std::vector<TShape>& input_shapes) { std::vector<TRShape> shape_infer(const GatherTree* op, const std::vector<TShape>& input_shapes) {
NODE_VALIDATION_CHECK(op, input_shapes.size() == 4); NODE_VALIDATION_CHECK(op, input_shapes.size() == 4);
using DimType = typename std::iterator_traits<typename TShape::iterator>::value_type; using DimType = typename TShape::value_type;
const auto& step_ids_shape = input_shapes[0]; const auto& step_ids_shape = input_shapes[0];
const auto& parent_idx_shape = input_shapes[1]; const auto& parent_idx_shape = input_shapes[1];
const auto& max_seq_len_pshape = input_shapes[2]; const auto& max_seq_len_pshape = input_shapes[2];
const auto& end_token_pshape = input_shapes[3]; const auto& end_token_pshape = input_shapes[3];
auto result_shape = step_ids_shape; TRShape result_shape = step_ids_shape;
NODE_VALIDATION_CHECK(op, NODE_VALIDATION_CHECK(op,
TShape::merge_into(result_shape, parent_idx_shape) && result_shape.rank().compatible(3), TRShape::merge_into(result_shape, parent_idx_shape) && result_shape.rank().compatible(3),
"step_ids and parent_idx inputs must have the same shape with rank 3. Got: ", "step_ids and parent_idx inputs must have the same shape with rank 3. Got: ",
step_ids_shape, step_ids_shape,
" and ", " and ",
@ -47,11 +50,6 @@ std::vector<TShape> shape_infer(const GatherTree* op, const std::vector<TShape>&
end_token_pshape); end_token_pshape);
return {result_shape}; return {result_shape};
} }
template <class TShape>
void shape_infer(const GatherTree* op, const std::vector<TShape>& input_shapes, std::vector<TShape>& output_shapes) {
output_shapes = shape_infer(op, input_shapes);
}
} // namespace v1 } // namespace v1
} // namespace op } // namespace op
} // namespace ov } // namespace ov

View File

@ -6,13 +6,15 @@
#include <openvino/op/generate_proposals.hpp> #include <openvino/op/generate_proposals.hpp>
#include "utils.hpp"
namespace ov { namespace ov {
namespace op { namespace op {
namespace v9 { namespace v9 {
template <class T> template <class T, class TRShape = result_shape_t<T>>
void shape_infer(const GenerateProposals* op, const std::vector<T>& input_shapes, std::vector<T>& output_shapes) { std::vector<TRShape> shape_infer(const GenerateProposals* op, const std::vector<T>& input_shapes) {
NODE_VALIDATION_CHECK(op, input_shapes.size() == 4 && output_shapes.size() == 3); NODE_VALIDATION_CHECK(op, input_shapes.size() == 4);
const auto& im_info_shape = input_shapes[0]; const auto& im_info_shape = input_shapes[0];
const auto& anchors_shape = input_shapes[1]; const auto& anchors_shape = input_shapes[1];
@ -123,9 +125,7 @@ void shape_infer(const GenerateProposals* op, const std::vector<T>& input_shapes
} }
auto num_rois = Dimension(0, (num_batches * op->get_attrs().post_nms_count).get_max_length()); auto num_rois = Dimension(0, (num_batches * op->get_attrs().post_nms_count).get_max_length());
output_shapes[0] = ov::PartialShape({num_rois, 4}); return {TRShape{num_rois, 4}, TRShape{num_rois}, TRShape{num_batches}};
output_shapes[1] = ov::PartialShape({num_rois});
output_shapes[2] = ov::PartialShape({num_batches});
} }
} // namespace v9 } // namespace v9

View File

@ -8,21 +8,24 @@
#include <openvino/op/grid_sample.hpp> #include <openvino/op/grid_sample.hpp>
#include <vector> #include <vector>
#include "utils.hpp"
namespace ov { namespace ov {
namespace op { namespace op {
namespace v9 { namespace v9 {
template <class shape_t> template <class TShape, class TRShape = result_shape_t<TShape>>
void shape_infer(const GridSample* op, const std::vector<shape_t>& input_shapes, std::vector<shape_t>& output_shapes) { std::vector<TRShape> shape_infer(const GridSample* op, const std::vector<TShape>& input_shapes) {
NODE_VALIDATION_CHECK(op, NODE_VALIDATION_CHECK(op,
input_shapes.size() == 2 && output_shapes.size() == 1, input_shapes.size() == 2,
"Incorrect number of input/output shapes in GridSample's shape inference function"); "Incorrect number of input shapes in GridSample's shape inference function");
const auto& data_shape = input_shapes[0]; const auto& data_shape = input_shapes[0];
NODE_VALIDATION_CHECK(op, data_shape.rank().compatible(4), "The supported shape of the input data tensor is 4D."); NODE_VALIDATION_CHECK(op, data_shape.rank().compatible(4), "The supported shape of the input data tensor is 4D.");
const auto& grid_shape = input_shapes[1]; const auto& grid_shape = input_shapes[1];
NODE_VALIDATION_CHECK(op, grid_shape.rank().compatible(4), "The supported shape of the grid tensor is 4D."); NODE_VALIDATION_CHECK(op, grid_shape.rank().compatible(4), "The supported shape of the grid tensor is 4D.");
shape_t output_shape; auto output_shapes = std::vector<TRShape>(1);
auto& output_shape = output_shapes.front();
output_shape.resize(4); output_shape.resize(4);
auto& batch_dim = output_shape[0]; auto& batch_dim = output_shape[0];
@ -41,7 +44,7 @@ void shape_infer(const GridSample* op, const std::vector<shape_t>& input_shapes,
if (data_shape.rank().is_static()) { if (data_shape.rank().is_static()) {
NODE_VALIDATION_CHECK( NODE_VALIDATION_CHECK(
op, op,
shape_t::value_type::merge(batch_dim, grid_shape[0], data_shape[0]), TShape::value_type::merge(batch_dim, grid_shape[0], data_shape[0]),
"The batch dimension in the input data tensor's shape doesn't match the batch dimension in " "The batch dimension in the input data tensor's shape doesn't match the batch dimension in "
"the grid tensor's shape."); "the grid tensor's shape.");
channel_dim = data_shape[1]; channel_dim = data_shape[1];
@ -50,8 +53,7 @@ void shape_infer(const GridSample* op, const std::vector<shape_t>& input_shapes,
batch_dim = data_shape[0]; batch_dim = data_shape[0];
channel_dim = data_shape[1]; channel_dim = data_shape[1];
} }
return output_shapes;
output_shapes[0] = std::move(output_shape);
} }
} // namespace v9 } // namespace v9

View File

@ -22,36 +22,38 @@ constexpr size_t filter_non_spatial_dims_count<v1::GroupConvolutionBackpropData>
} // namespace convolution } // namespace convolution
namespace v1 { namespace v1 {
template <class TShape> template <class TShape, class TRShape = result_shape_t<TShape>>
std::vector<TShape> shape_infer(const GroupConvolutionBackpropData* op, std::vector<TRShape> shape_infer(const GroupConvolutionBackpropData* op,
const std::vector<TShape>& input_shapes, const std::vector<TShape>& input_shapes,
CoordinateDiff& pads_begin, CoordinateDiff& pads_begin,
CoordinateDiff& pads_end, CoordinateDiff& pads_end,
const std::map<size_t, HostTensorPtr>& constant_data = {}) { const ITensorAccessor& ta = make_tensor_accessor()) {
const auto inputs_count = input_shapes.size(); const auto inputs_count = input_shapes.size();
const auto has_spatial_shape = inputs_count >= 3; const auto has_spatial_shape = inputs_count >= 3;
NODE_VALIDATION_CHECK(op, inputs_count >= 2); NODE_VALIDATION_CHECK(op, inputs_count >= 2);
using namespace ov::util; using namespace ov::util;
TShape out_spatial_shape; ov::optional<TRShape> out_spatial_shape;
if (has_spatial_shape) { if (has_spatial_shape) {
const auto& spatial_shape = input_shapes[2]; const auto& spatial_shape = input_shapes[2];
NODE_VALIDATION_CHECK(op, NODE_VALIDATION_CHECK(op,
spatial_shape.rank().compatible(1), spatial_shape.rank().compatible(1),
"Input delivering output shape must have rank 1."); "Input delivering output shape must have rank 1.");
out_spatial_shape = get_input_const_data_as_shape<TRShape>(op, 2, ta);
if (!get_data_as_shape(2, op, out_spatial_shape, constant_data)) { if (!out_spatial_shape) {
if (spatial_shape.is_static()) { if (spatial_shape.is_static()) {
out_spatial_shape.resize(spatial_shape[0].get_length()); out_spatial_shape.emplace();
out_spatial_shape->resize(spatial_shape[0].get_length());
} else { } else {
out_spatial_shape = PartialShape::dynamic(); out_spatial_shape = PartialShape::dynamic();
} }
} }
} else {
out_spatial_shape.emplace();
} }
const auto num_spatial = convolution::calculate_num_spatial(op, input_shapes, *out_spatial_shape);
const auto num_spatial = convolution::calculate_num_spatial(op, input_shapes, out_spatial_shape); TRShape output_shape;
TShape output_shape;
if (num_spatial != util::num_spatial_undefined) { if (num_spatial != util::num_spatial_undefined) {
const auto& data_shape = input_shapes[0]; const auto& data_shape = input_shapes[0];
const auto& filters_shape = input_shapes[1]; const auto& filters_shape = input_shapes[1];
@ -60,9 +62,8 @@ std::vector<TShape> shape_infer(const GroupConvolutionBackpropData* op,
NODE_VALIDATION_CHECK( NODE_VALIDATION_CHECK(
op, op,
!has_spatial_shape || out_spatial_shape.rank().is_dynamic() || out_spatial_shape.size() == num_spatial, !has_spatial_shape || out_spatial_shape->rank().is_dynamic() || out_spatial_shape->size() == num_spatial,
"Output shape should be defined for all and only spatial dimensions."); "Output shape should be defined for all and only spatial dimensions.");
convolution::resize_empty_padding(num_spatial, pads_begin, pads_end); convolution::resize_empty_padding(num_spatial, pads_begin, pads_end);
if (is_attr_validation_required(op)) { if (is_attr_validation_required(op)) {
convolution::validate::data_shape(op, data_shape); convolution::validate::data_shape(op, data_shape);
@ -77,8 +78,7 @@ std::vector<TShape> shape_infer(const GroupConvolutionBackpropData* op,
convolution::validate::common_attributes(op, num_spatial, pads_begin, pads_end); convolution::validate::common_attributes(op, num_spatial, pads_begin, pads_end);
} }
convolution::apply_padding(op, input_shapes, out_spatial_shape, pads_begin, pads_end); convolution::apply_padding(op, input_shapes, *out_spatial_shape, pads_begin, pads_end);
output_shape.reserve(util::spatial_dim_offset + num_spatial); output_shape.reserve(util::spatial_dim_offset + num_spatial);
output_shape.emplace_back(data_rank.is_static() ? data_shape[0] : dim::inf_bound); output_shape.emplace_back(data_rank.is_static() ? data_shape[0] : dim::inf_bound);
@ -102,8 +102,8 @@ std::vector<TShape> shape_infer(const GroupConvolutionBackpropData* op,
// add spatial dimensions // add spatial dimensions
if (has_spatial_shape) { if (has_spatial_shape) {
output_shape.insert(output_shape.end(), output_shape.insert(output_shape.end(),
std::make_move_iterator(out_spatial_shape.begin()), std::make_move_iterator(out_spatial_shape->begin()),
std::make_move_iterator(out_spatial_shape.end())); std::make_move_iterator(out_spatial_shape->end()));
} else { } else {
convolution::append_spatial_shape(op, data_shape, filters_shape, pads_begin, pads_end, output_shape); convolution::append_spatial_shape(op, data_shape, filters_shape, pads_begin, pads_end, output_shape);
} }

View File

@ -22,18 +22,17 @@ constexpr size_t filter_non_spatial_dims_count<v1::GroupConvolution>() {
} // namespace convolution } // namespace convolution
namespace v1 { namespace v1 {
template <class TShape> template <class TShape, class TRShape = result_shape_t<TShape>>
std::vector<TShape> shape_infer(const GroupConvolution* op, std::vector<TRShape> shape_infer(const GroupConvolution* op,
const std::vector<TShape>& input_shapes, const std::vector<TShape>& input_shapes,
CoordinateDiff& pads_begin, CoordinateDiff& pads_begin,
CoordinateDiff& pads_end, CoordinateDiff& pads_end) {
const std::map<size_t, HostTensorPtr>& constant_data = {}) {
NODE_VALIDATION_CHECK(op, input_shapes.size() >= 2); NODE_VALIDATION_CHECK(op, input_shapes.size() >= 2);
using namespace ov::util; using namespace ov::util;
const auto num_spatial = convolution::calculate_num_spatial(op, input_shapes); const auto num_spatial = convolution::calculate_num_spatial(op, input_shapes);
TShape output_shape; TRShape output_shape;
if (num_spatial != convolution::num_spatial_undefined) { if (num_spatial != convolution::num_spatial_undefined) {
const auto& data_shape = input_shapes[0]; const auto& data_shape = input_shapes[0];
const auto& filters_shape = input_shapes[1]; const auto& filters_shape = input_shapes[1];

View File

@ -50,13 +50,6 @@ std::vector<TShape> shape_infer(const GroupNormalization* op, const std::vector<
return {input_shapes[0]}; return {input_shapes[0]};
} }
template <class TShape>
void shape_infer(const GroupNormalization* op,
const std::vector<TShape>& input_shapes,
std::vector<TShape>& output_shapes) {
output_shapes = shape_infer(op, input_shapes);
}
} // namespace v12 } // namespace v12
} // namespace op } // namespace op
} // namespace ov } // namespace ov

View File

@ -12,17 +12,12 @@
namespace ov { namespace ov {
namespace op { namespace op {
namespace v3 { namespace v3 {
template <class TShape> template <class TShape, class TRShape = result_shape_t<TShape>>
std::vector<TShape> shape_infer(const GRUCell* op, const std::vector<TShape>& input_shapes) { std::vector<TRShape> shape_infer(const GRUCell* op, const std::vector<TShape>& input_shapes) {
constexpr auto num_gates = 3; constexpr auto num_gates = 3;
constexpr auto num_state_nodes = 1; constexpr auto num_state_nodes = 1;
return rnn::cell_base_shape_infer(op, input_shapes, num_gates, num_state_nodes, op->get_linear_before_reset()); return rnn::cell_base_shape_infer(op, input_shapes, num_gates, num_state_nodes, op->get_linear_before_reset());
} }
template <class TShape>
void shape_infer(const GRUCell* op, const std::vector<TShape>& input_shapes, std::vector<TShape>& output_shapes) {
output_shapes = shape_infer(op, input_shapes);
}
} // namespace v3 } // namespace v3
} // namespace op } // namespace op
} // namespace ov } // namespace ov

View File

@ -11,10 +11,8 @@
namespace ov { namespace ov {
namespace op { namespace op {
namespace v5 { namespace v5 {
template <class TShape> template <class TShape, class TRShape = result_shape_t<TShape>>
void shape_infer(const ov::op::v5::GRUSequence* op, std::vector<TRShape> shape_infer(const ov::op::v5::GRUSequence* op, const std::vector<TShape>& input_shapes) {
const std::vector<TShape>& input_shapes,
std::vector<TShape>& output_shapes) {
constexpr size_t expected_in_shapes_count = 6; constexpr size_t expected_in_shapes_count = 6;
NODE_VALIDATION_CHECK(op, NODE_VALIDATION_CHECK(op,
input_shapes.size() == expected_in_shapes_count, input_shapes.size() == expected_in_shapes_count,
@ -26,12 +24,12 @@ void shape_infer(const ov::op::v5::GRUSequence* op,
constexpr auto num_gates = 3; constexpr auto num_gates = 3;
constexpr auto num_state_nodes = 1; constexpr auto num_state_nodes = 1;
output_shapes = rnn::seq_base_shape_infer(op, return rnn::seq_base_shape_infer(op,
input_shapes, input_shapes,
num_gates, num_gates,
num_state_nodes, num_state_nodes,
op->get_direction(), op->get_direction(),
op->get_linear_before_reset()); op->get_linear_before_reset());
} }
} // namespace v5 } // namespace v5
} // namespace op } // namespace op

View File

@ -130,10 +130,10 @@ void resize_padding(const ov::op::util::InterpolateBase* op,
* @param pads_end Dimensions end padding values. * @param pads_end Dimensions end padding values.
* @return TShape Shape with dimensions of input plus paddings. * @return TShape Shape with dimensions of input plus paddings.
*/ */
template <class TShape, class TInputIter> template <class TShape, class TInputIter, class TRShape = result_shape_t<TShape>>
TShape make_padded_shape(const TShape& input, TInputIter pads_begin, TInputIter pads_end) { TRShape make_padded_shape(const TShape& input, TInputIter pads_begin, TInputIter pads_end) {
using TDim = typename TShape::value_type; using TDim = typename TShape::value_type;
TShape out; TRShape out;
out.reserve(input.size()); out.reserve(input.size());
std::transform(input.cbegin(), input.cend(), std::back_inserter(out), [&pads_begin, &pads_end](const TDim& d) { std::transform(input.cbegin(), input.cend(), std::back_inserter(out), [&pads_begin, &pads_end](const TDim& d) {
return ov::util::dim::padded(d, (*pads_begin++ + *pads_end++)); return ov::util::dim::padded(d, (*pads_begin++ + *pads_end++));
@ -240,14 +240,14 @@ void update_dims_with_scales_on_axes(TShape& out_shape,
} // namespace interpolate } // namespace interpolate
namespace v0 { namespace v0 {
template <class TShape> template <class TShape, class TRShape = result_shape_t<TShape>>
std::vector<TShape> shape_infer(const Interpolate* op, std::vector<TRShape> shape_infer(const Interpolate* op,
const std::vector<TShape>& input_shapes, const std::vector<TShape>& input_shapes,
const ITensorAccessor& tensor_accessor) { const ITensorAccessor& tensor_accessor) {
NODE_VALIDATION_CHECK(op, input_shapes.size() == 2); NODE_VALIDATION_CHECK(op, input_shapes.size() == 2);
const auto& img_shape = input_shapes[0]; const auto& img_shape = input_shapes[0];
auto output_shapes = std::vector<TShape>(1, img_shape); auto output_shapes = std::vector<TRShape>(1, img_shape);
auto& out_shape = output_shapes.front(); auto& out_shape = output_shapes.front();
if (img_shape.rank().is_static()) { if (img_shape.rank().is_static()) {
@ -256,7 +256,7 @@ std::vector<TShape> shape_infer(const Interpolate* op,
interpolate::validate::axes_values(op, axes, img_rank); interpolate::validate::axes_values(op, axes, img_rank);
if (const auto target_spatial_shape = get_input_const_data_as_shape<TShape>(op, 1, tensor_accessor)) { if (const auto target_spatial_shape = get_input_const_data_as_shape<TRShape>(op, 1, tensor_accessor)) {
auto target_spatial_shape_iter = target_spatial_shape->begin(); auto target_spatial_shape_iter = target_spatial_shape->begin();
for (const auto axis : axes) { for (const auto axis : axes) {
out_shape[axis] = *target_spatial_shape_iter++; out_shape[axis] = *target_spatial_shape_iter++;
@ -271,12 +271,12 @@ std::vector<TShape> shape_infer(const Interpolate* op,
} // namespace v0 } // namespace v0
namespace v4 { namespace v4 {
template <class TShape, class TContainer> template <class TShape, class TContainer, class TRShape = result_shape_t<TShape>>
std::vector<TShape> shape_infer(const Interpolate* op, std::vector<TRShape> shape_infer(const Interpolate* op,
const std::vector<TShape>& input_shapes, const std::vector<TShape>& input_shapes,
TContainer& pads_begin, TContainer& pads_begin,
TContainer& pads_end, TContainer& pads_end,
const ITensorAccessor& tensor_accessor) { const ITensorAccessor& tensor_accessor) {
const auto has_axes_input = (input_shapes.size() == 4); const auto has_axes_input = (input_shapes.size() == 4);
NODE_VALIDATION_CHECK(op, (input_shapes.size() == 3 || has_axes_input)); NODE_VALIDATION_CHECK(op, (input_shapes.size() == 3 || has_axes_input));
@ -289,13 +289,13 @@ std::vector<TShape> shape_infer(const Interpolate* op,
} }
const auto& img_shape = input_shapes[0]; const auto& img_shape = input_shapes[0];
auto output_shapes = std::vector<TShape>(); auto output_shapes = std::vector<TRShape>();
if (img_shape.rank().is_static()) { if (img_shape.rank().is_static()) {
const auto img_rank = img_shape.size(); const auto img_rank = img_shape.size();
interpolate::resize_padding(op, img_rank, pads_begin, pads_end); interpolate::resize_padding(op, img_rank, pads_begin, pads_end);
const auto axes = interpolate::get_axes<TShape>(op, 3, has_axes_input, img_rank, tensor_accessor); const auto axes = interpolate::get_axes<TRShape>(op, 3, has_axes_input, img_rank, tensor_accessor);
if (axes) { if (axes) {
output_shapes.push_back(interpolate::make_padded_shape(img_shape, pads_begin.cbegin(), pads_end.cbegin())); output_shapes.push_back(interpolate::make_padded_shape(img_shape, pads_begin.cbegin(), pads_end.cbegin()));
@ -315,18 +315,18 @@ std::vector<TShape> shape_infer(const Interpolate* op,
} // namespace v4 } // namespace v4
namespace v11 { namespace v11 {
template <class TShape, class TContainer> template <class TShape, class TContainer, class TRShape = result_shape_t<TShape>>
std::vector<TShape> shape_infer(const Interpolate* op, std::vector<TRShape> shape_infer(const Interpolate* op,
const std::vector<TShape>& input_shapes, const std::vector<TShape>& input_shapes,
TContainer& pads_begin, TContainer& pads_begin,
TContainer& pads_end, TContainer& pads_end,
const ITensorAccessor& tensor_accessor) { const ITensorAccessor& tensor_accessor) {
NODE_VALIDATION_CHECK(op, (input_shapes.size() == 2 || input_shapes.size() == 3)); NODE_VALIDATION_CHECK(op, (input_shapes.size() == 2 || input_shapes.size() == 3));
interpolate::validate::are_inputs_except_first_1d(op, input_shapes); interpolate::validate::are_inputs_except_first_1d(op, input_shapes);
const auto& img_shape = input_shapes[0]; const auto& img_shape = input_shapes[0];
auto output_shapes = std::vector<TShape>(); auto output_shapes = std::vector<TRShape>();
if (img_shape.rank().is_static()) { if (img_shape.rank().is_static()) {
const auto img_rank = img_shape.size(); const auto img_rank = img_shape.size();
@ -334,7 +334,7 @@ std::vector<TShape> shape_infer(const Interpolate* op,
interpolate::resize_padding(op, img_rank, pads_begin, pads_end); interpolate::resize_padding(op, img_rank, pads_begin, pads_end);
const auto axes = interpolate::get_axes<TShape>(op, 2, has_axes_input, img_rank, tensor_accessor); const auto axes = interpolate::get_axes<TRShape>(op, 2, has_axes_input, img_rank, tensor_accessor);
if (axes) { if (axes) {
output_shapes.push_back(interpolate::make_padded_shape(img_shape, pads_begin.cbegin(), pads_end.cbegin())); output_shapes.push_back(interpolate::make_padded_shape(img_shape, pads_begin.cbegin(), pads_end.cbegin()));

View File

@ -2,38 +2,39 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#pragma once #pragma once
#include <openvino/op/irdft.hpp>
#include "openvino/core/axis_vector.hpp" #include "openvino/core/axis_vector.hpp"
#include "openvino/op/irdft.hpp"
#include "rfft_common_validation.hpp" #include "rfft_common_validation.hpp"
#include "utils.hpp" #include "utils.hpp"
namespace ov { namespace ov {
namespace op { namespace op {
namespace v9 { namespace v9 {
template <class T> template <class T, class TRShape = result_shape_t<T>>
void shape_infer(const ov::op::v9::IRDFT* op, std::vector<TRShape> shape_infer(const IRDFT* op,
const std::vector<T>& input_shapes, const std::vector<T>& input_shapes,
std::vector<T>& output_shapes, const ITensorAccessor& ta = make_tensor_accessor()) {
const std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>>& constant_data = {}) { using DimType = typename T::value_type;
using DimType = typename std::iterator_traits<typename T::iterator>::value_type; NODE_VALIDATION_CHECK(op, (input_shapes.size() == 2 || input_shapes.size() == 3));
NODE_VALIDATION_CHECK(op, (input_shapes.size() == 2 || input_shapes.size() == 3) && output_shapes.size() == 1);
const auto& input_shape = input_shapes[0]; const auto& input_shape = input_shapes[0];
const auto& axes_shape = input_shapes[1]; const auto& axes_shape = input_shapes[1];
auto output_shapes = std::vector<TRShape>(1);
auto& output_shape = output_shapes[0]; auto& output_shape = output_shapes[0];
std::vector<int64_t> axes;
bool axes_are_known = get_data_as_int64<T>(1, op, axes, constant_data); auto axes = get_input_const_data_as<TRShape, int64_t>(op, 1, ta);
auto axes_are_known = static_cast<bool>(axes);
util::rfft_common_validation::shape_validation(op, util::rfft_common_validation::shape_validation(op,
input_shapes, input_shapes,
axes, *axes,
axes_are_known, axes_are_known,
util::rfft_common_validation::RFFTKind::Inverse); util::rfft_common_validation::RFFTKind::Inverse);
if (input_shape.rank().is_dynamic()) { if (input_shape.rank().is_dynamic()) {
output_shape = ov::PartialShape::dynamic(); output_shape = ov::PartialShape::dynamic();
return; return output_shapes;
} }
const auto input_rank = input_shape.size(); const auto input_rank = input_shape.size();
@ -45,34 +46,34 @@ void shape_infer(const ov::op::v9::IRDFT* op,
for (size_t i = 0; i < input_rank - 1; ++i) { for (size_t i = 0; i < input_rank - 1; ++i) {
output_shape[i] = ov::Dimension::dynamic(); output_shape[i] = ov::Dimension::dynamic();
} }
return; return output_shapes;
} }
const auto last_axis = axes.back(); const auto last_axis = axes->back();
if (input_shapes.size() == 2) { if (input_shapes.size() == 2) {
output_shape[last_axis] = DimType(2) * (input_shape[last_axis] - DimType(1)); output_shape[last_axis] = DimType(2) * (input_shape[last_axis] - DimType(1));
return; return output_shapes;
} }
const auto& signal_size_shape = input_shapes[2]; const auto& signal_size_shape = input_shapes[2];
std::vector<int64_t> signal_size; auto signal_size = get_input_const_data_as<TRShape, int64_t>(op, 2, ta);
bool status_signal_size = get_data_as_int64<T>(2, op, signal_size, constant_data);
if (signal_size_shape.rank().is_dynamic() || !status_signal_size) { if (signal_size_shape.rank().is_dynamic() || !signal_size) {
output_shape[last_axis] = ov::Dimension::dynamic(); output_shape[last_axis] = ov::Dimension::dynamic();
return; return output_shapes;
} }
size_t num_of_axes = axes.size(); size_t num_of_axes = axes->size();
for (size_t i = 0; i < num_of_axes; ++i) { for (size_t i = 0; i < num_of_axes; ++i) {
if (signal_size[i] != -1) { if ((*signal_size)[i] != -1) {
output_shape[axes[i]] = DimType(signal_size[i]); output_shape[(*axes)[i]] = DimType((*signal_size)[i]);
} }
} }
if (signal_size.back() == -1) { if (signal_size->back() == -1) {
output_shape[last_axis] = DimType(2) * (input_shape[last_axis] - DimType(1)); output_shape[last_axis] = DimType(2) * (input_shape[last_axis] - DimType(1));
} }
return output_shapes;
} }
} // namespace v9 } // namespace v9
} // namespace op } // namespace op

View File

@ -11,11 +11,18 @@ namespace ov {
namespace op { namespace op {
namespace v0 { namespace v0 {
namespace lstm_cell {
constexpr size_t gates_count = 4;
constexpr size_t num_state_nodes = 2;
constexpr size_t peepholes_count = 3;
} // namespace lstm_cell
template <class T> template <class T>
void shape_infer(const LSTMCell* op, const std::vector<T>& input_shapes, std::vector<T>& output_shapes) { std::vector<result_shape_t<T>> shape_infer(const LSTMCell* op, const std::vector<T>& input_shapes) {
NODE_VALIDATION_CHECK(op, input_shapes.size() == 7 && output_shapes.size() == 2); NODE_VALIDATION_CHECK(op, input_shapes.size() == 7);
constexpr auto num_state_nodes = 2;
output_shapes = rnn::cell_base_shape_infer(op, input_shapes, op->s_gates_count, num_state_nodes); auto output_shapes =
rnn::cell_base_shape_infer(op, input_shapes, lstm_cell::gates_count, lstm_cell::num_state_nodes);
const auto& hidden_size = output_shapes[0][1]; const auto& hidden_size = output_shapes[0][1];
if (hidden_size.is_dynamic()) { // set hidden_size based on attribute if (hidden_size.is_dynamic()) { // set hidden_size based on attribute
output_shapes[0][1] = op->get_hidden_size(); output_shapes[0][1] = op->get_hidden_size();
@ -24,34 +31,33 @@ void shape_infer(const LSTMCell* op, const std::vector<T>& input_shapes, std::ve
const auto& p_pshape = input_shapes[6]; const auto& p_pshape = input_shapes[6];
if (p_pshape[0].is_static() && hidden_size.is_static()) { if (p_pshape[0].is_static() && hidden_size.is_static()) {
NODE_VALIDATION_CHECK(op, NODE_VALIDATION_CHECK(op,
p_pshape[0].compatible(hidden_size * op->s_peepholes_count), p_pshape[0].compatible(hidden_size * 3),
"Parameter hidden_size mistmatched in P input. Current value is: ", "Parameter hidden_size mismatched in P input. Current value is: ",
p_pshape[0].get_length(), p_pshape[0].get_length(),
", expected: ", ", expected: ",
hidden_size.get_length() * op->s_peepholes_count, hidden_size.get_length() * 3,
"."); ".");
} }
return output_shapes;
} }
} // namespace v0 } // namespace v0
namespace v4 { namespace v4 {
namespace lstm_cell {
constexpr size_t gates_count = 4;
}
template <class TShape> template <class TShape>
std::vector<TShape> shape_infer(const LSTMCell* op, const std::vector<TShape>& input_shapes) { std::vector<result_shape_t<TShape>> shape_infer(const LSTMCell* op, const std::vector<TShape>& input_shapes) {
NODE_VALIDATION_CHECK(op, input_shapes.size() == 6); NODE_VALIDATION_CHECK(op, input_shapes.size() == 6);
constexpr auto num_gates = 4;
constexpr auto num_state_nodes = 2; constexpr auto num_state_nodes = 2;
auto output_shapes = rnn::cell_base_shape_infer(op, input_shapes, num_gates, num_state_nodes); auto output_shapes = rnn::cell_base_shape_infer(op, input_shapes, lstm_cell::gates_count, num_state_nodes);
if (output_shapes[0][1].is_dynamic()) { // set hidden_size based on attribute if (output_shapes[0][1].is_dynamic()) { // set hidden_size based on attribute
output_shapes[0][1] = op->get_hidden_size(); output_shapes[0][1] = op->get_hidden_size();
output_shapes[1][1] = op->get_hidden_size(); output_shapes[1][1] = op->get_hidden_size();
} }
return output_shapes; return output_shapes;
} }
template <class T>
void shape_infer(const LSTMCell* op, const std::vector<T>& input_shapes, std::vector<T>& output_shapes) {
output_shapes = shape_infer(op, input_shapes);
}
} // namespace v4 } // namespace v4
} // namespace op } // namespace op
} // namespace ov } // namespace ov

View File

@ -9,7 +9,7 @@ namespace ov {
namespace op { namespace op {
namespace v0 { namespace v0 {
template <class TShape> template <class TShape>
std::vector<TShape> shape_infer(const LSTMSequence* op, const std::vector<TShape>& input_shapes) { std::vector<result_shape_t<TShape>> shape_infer(const LSTMSequence* op, const std::vector<TShape>& input_shapes) {
constexpr auto num_gates = 4; constexpr auto num_gates = 4;
constexpr auto num_state_nodes = 2; constexpr auto num_state_nodes = 2;
const auto output_shapes = const auto output_shapes =
@ -33,7 +33,7 @@ std::vector<TShape> shape_infer(const LSTMSequence* op, const std::vector<TShape
} // namespace v0 } // namespace v0
namespace v5 { namespace v5 {
template <class TShape> template <class TShape>
std::vector<TShape> shape_infer(const LSTMSequence* op, const std::vector<TShape>& input_shapes) { std::vector<result_shape_t<TShape>> shape_infer(const LSTMSequence* op, const std::vector<TShape>& input_shapes) {
constexpr auto num_gates = 4; constexpr auto num_gates = 4;
constexpr auto num_state_nodes = 2; constexpr auto num_state_nodes = 2;
return rnn::seq_base_shape_infer(op, input_shapes, num_gates, num_state_nodes, op->get_direction()); return rnn::seq_base_shape_infer(op, input_shapes, num_gates, num_state_nodes, op->get_direction());

View File

@ -10,15 +10,16 @@
namespace ov { namespace ov {
namespace op { namespace op {
namespace v0 { namespace v0 {
template <class T> template <class T, class TRShape = result_shape_t<T>>
void shape_infer(const ov::op::v0::MatMul* op, const std::vector<T>& input_shapes, std::vector<T>& output_shapes) { std::vector<TRShape> shape_infer(const MatMul* op, const std::vector<T>& input_shapes) {
NODE_VALIDATION_CHECK(op, input_shapes.size() == 2 && output_shapes.size() == 1); NODE_VALIDATION_CHECK(op, input_shapes.size() == 2);
auto arg0_shape = input_shapes[0], arg1_shape = input_shapes[1]; auto arg0_shape = input_shapes[0], arg1_shape = input_shapes[1];
if (arg0_shape.rank().is_dynamic() || arg1_shape.rank().is_dynamic()) { if (arg0_shape.rank().is_dynamic() || arg1_shape.rank().is_dynamic()) {
output_shapes[0] = ov::PartialShape::dynamic(); return {ov::PartialShape::dynamic()};
return;
} }
auto output_shapes = std::vector<TRShape>();
// ranks are known // ranks are known
const bool transpose_a = op->get_transpose_a(); const bool transpose_a = op->get_transpose_a();
const bool transpose_b = op->get_transpose_b(); const bool transpose_b = op->get_transpose_b();
@ -27,7 +28,7 @@ void shape_infer(const ov::op::v0::MatMul* op, const std::vector<T>& input_shape
NODE_VALIDATION_CHECK(op, (arg0_rank != 0 && arg1_rank != 0), "Scalars are not supported as MatMul inputs."); NODE_VALIDATION_CHECK(op, (arg0_rank != 0 && arg1_rank != 0), "Scalars are not supported as MatMul inputs.");
// Temporary Dimension vectors to calculate output shape // Temporary Dimension vectors to calculate output shape
T arg0_shape_tmp(arg0_shape), arg1_shape_tmp(arg1_shape); TRShape arg0_shape_tmp(arg0_shape), arg1_shape_tmp(arg1_shape);
// 1. Applying transpositions specified by optional `transpose_a` and `transpose_b` // 1. Applying transpositions specified by optional `transpose_a` and `transpose_b`
// Only two right-most dimensions are swapped, other dimensions remain the same. // Only two right-most dimensions are swapped, other dimensions remain the same.
@ -58,7 +59,7 @@ void shape_infer(const ov::op::v0::MatMul* op, const std::vector<T>& input_shape
// COL_INDEX_DIM of the first matrix has to match ROW_INDEX_DIM of the second matrix. // COL_INDEX_DIM of the first matrix has to match ROW_INDEX_DIM of the second matrix.
// Error is not thrown for dynamic dimensions bounds without intersection // Error is not thrown for dynamic dimensions bounds without intersection
// to ensure MatMul backward compatibility. // to ensure MatMul backward compatibility.
using DimType = typename std::iterator_traits<typename T::iterator>::value_type; using DimType = typename T::value_type;
auto merged_dimension = DimType(); auto merged_dimension = DimType();
auto arg0_col_dim = arg0_shape_tmp[arg0_rank - 1]; auto arg0_col_dim = arg0_shape_tmp[arg0_rank - 1];
auto arg1_row_dim = arg1_shape_tmp[arg1_rank - 2]; auto arg1_row_dim = arg1_shape_tmp[arg1_rank - 2];
@ -116,7 +117,8 @@ void shape_infer(const ov::op::v0::MatMul* op, const std::vector<T>& input_shape
// arg1 input temporary axis inserted at COL_INDEX_DIM is removed // arg1 input temporary axis inserted at COL_INDEX_DIM is removed
output_shape.erase(output_shape.begin() + output_shape.size() - 1); output_shape.erase(output_shape.begin() + output_shape.size() - 1);
} }
output_shapes[0] = output_shape; output_shapes.emplace_back(std::move(output_shape));
return output_shapes;
} }
} // namespace v0 } // namespace v0
} // namespace op } // namespace op

View File

@ -12,12 +12,11 @@ namespace ov {
namespace op { namespace op {
namespace v1 { namespace v1 {
template <class TShape, class TContainer> template <class TShape, class TContainer, class TRShape = result_shape_t<TShape>>
std::vector<TShape> shape_infer(const MaxPool* op, std::vector<TRShape> shape_infer(const MaxPool* op,
const std::vector<TShape>& input_shapes, const std::vector<TShape>& input_shapes,
TContainer& pads_begin, TContainer& pads_begin,
TContainer& pads_end, TContainer& pads_end) {
const std::map<size_t, HostTensorPtr>& constant_data = {}) {
const auto& data_shape = input_shapes[0]; const auto& data_shape = input_shapes[0];
const auto dilations = Strides(op->get_kernel().size(), 1); const auto dilations = Strides(op->get_kernel().size(), 1);
@ -32,12 +31,11 @@ std::vector<TShape> shape_infer(const MaxPool* op,
} // namespace v1 } // namespace v1
namespace v8 { namespace v8 {
template <class TShape, class TContainer> template <class TShape, class TContainer, class TRShape = result_shape_t<TShape>>
std::vector<TShape> shape_infer(const MaxPool* op, std::vector<TRShape> shape_infer(const MaxPool* op,
const std::vector<TShape>& input_shapes, const std::vector<TShape>& input_shapes,
TContainer& pads_begin, TContainer& pads_begin,
TContainer& pads_end, TContainer& pads_end) {
const std::map<size_t, HostTensorPtr>& constant_data = {}) {
NODE_VALIDATION_CHECK(op, input_shapes.size() == 1); NODE_VALIDATION_CHECK(op, input_shapes.size() == 1);
const auto& data_shape = input_shapes[0]; const auto& data_shape = input_shapes[0];

View File

@ -4,25 +4,23 @@
#pragma once #pragma once
#include <ngraph/validation_util.hpp>
#include <openvino/op/non_max_suppression.hpp>
#include <vector> #include <vector>
#include "openvino/core/validation_util.hpp"
#include "openvino/op/non_max_suppression.hpp"
#include "utils.hpp" #include "utils.hpp"
using namespace ngraph;
namespace ov { namespace ov {
namespace op { namespace op {
namespace v9 { namespace v9 {
template <class T> template <class T, class TRShape = result_shape_t<T>>
void shape_infer(const NonMaxSuppression* op, void shape_infer(const NonMaxSuppression* op,
const std::vector<T>& input_shapes, const std::vector<T>& input_shapes,
std::vector<T>& output_shapes, std::vector<T>& output_shapes,
bool static_output = false, bool static_output = false,
const std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>>& constant_data = {}) { const ITensorAccessor& ta = make_tensor_accessor()) {
// this shape_infer differs from all the other - it is used in GPU during compile-time and infer-time in custom code // this shape_infer differs from all the other - it is used in GPU during compile-time and infer-time in custom
NODE_VALIDATION_CHECK(op, input_shapes.size() == 2 && output_shapes.size() == 3); NODE_VALIDATION_CHECK(op, input_shapes.size() == 2 && output_shapes.size() == 3);
const auto& boxes_ps = input_shapes[0]; const auto& boxes_ps = input_shapes[0];
@ -66,17 +64,16 @@ void shape_infer(const NonMaxSuppression* op,
// NonMaxSuppression produces triplets // NonMaxSuppression produces triplets
// that have the following format: [batch_index, class_index, box_index] // that have the following format: [batch_index, class_index, box_index]
ov::PartialShape out_shape = {Dimension::dynamic(), 3}; TRShape out_shape = {Dimension::dynamic(), 3};
if (boxes_ps.rank().is_static() && scores_ps.rank().is_static()) { if (boxes_ps.rank().is_static() && scores_ps.rank().is_static()) {
const auto num_boxes_boxes = boxes_ps[1]; const auto num_boxes_boxes = boxes_ps[1];
if (num_boxes_boxes.get_max_length() != -1 && scores_ps[0].get_max_length() != -1 && if (num_boxes_boxes.get_max_length() != -1 && scores_ps[0].get_max_length() != -1 &&
scores_ps[1].get_max_length() != -1) { scores_ps[1].get_max_length() != -1) {
const auto num_boxes = num_boxes_boxes.get_max_length(); const auto num_boxes = num_boxes_boxes.get_max_length();
const auto num_classes = scores_ps[1].get_max_length(); const auto num_classes = scores_ps[1].get_max_length();
std::vector<int64_t> max_output_boxes_per_class_as_vals;
if ((op->get_input_size() > 2 || constant_data.count(2)) && if (auto max_output_boxes_per_class_as_vals = get_input_const_data_as<TRShape, int64_t>(op, 2, ta)) {
get_data_as_int64<T>(2, op, max_output_boxes_per_class_as_vals, constant_data)) { int64_t max_output_boxes_per_class = (*max_output_boxes_per_class_as_vals)[0];
int64_t max_output_boxes_per_class = max_output_boxes_per_class_as_vals[0];
out_shape[0] = static_output ? std::min(num_boxes, max_output_boxes_per_class) * num_classes * out_shape[0] = static_output ? std::min(num_boxes, max_output_boxes_per_class) * num_classes *
scores_ps[0].get_max_length() scores_ps[0].get_max_length()
: Dimension(0, : Dimension(0,

View File

@ -2,63 +2,28 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#pragma once #pragma once
#include <openvino/core/validation_util.hpp>
#include <openvino/op/one_hot.hpp>
#include "openvino/op/one_hot.hpp"
#include "utils.hpp" #include "utils.hpp"
namespace ov { namespace ov {
namespace op { namespace op {
namespace util {
template <class T>
struct GetNotNegative {
const Node* m_op;
GetNotNegative(const Node* op) : m_op{op} {}
template <class V>
T operator()(const V v) const {
NODE_VALIDATION_CHECK(m_op, cmp::ge(v, 0), "OneHot depth value can't be negative.");
return static_cast<T>(v);
}
};
} // namespace util
namespace v1 { namespace v1 {
namespace utils {
namespace one_hot {
OPENVINO_SUPPRESS_DEPRECATED_START
template <class TShape>
inline bool get_data_as_shape_and_validate_sign(
size_t idx,
const ov::Node* op,
TShape& shape,
const std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>>& constant_data) {
if (constant_data.count(idx)) {
using DimType = typename TShape::value_type;
const auto data = host_tensor_2_vector<int64_t>(constant_data.at(idx));
shape.clear();
std::transform(data.cbegin(), data.cend(), std::back_inserter(shape), [&](int64_t v) {
NODE_VALIDATION_CHECK(op, v >= 0, "OneHot depth value can't be negative.");
return static_cast<DimType>(v);
});
return true;
} else {
return get_data_as_shape<TShape>(idx, op, shape, constant_data);
}
}
template <>
inline bool get_data_as_shape_and_validate_sign<ov::PartialShape>(
size_t idx,
const ov::Node* op,
ov::PartialShape& shape,
const std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>>& constant_data) {
if (constant_data.count(idx)) {
const auto data = host_tensor_2_vector<int64_t>(constant_data.at(idx));
for (const auto& value : data) {
NODE_VALIDATION_CHECK(op, value >= 0, "OneHot depth value can't be negative.");
}
shape = PartialShape(data);
return true;
} else {
OPENVINO_SUPPRESS_DEPRECATED_START
return ov::evaluate_as_partial_shape(op->input_value(idx), shape);
OPENVINO_SUPPRESS_DEPRECATED_END
}
}
OPENVINO_SUPPRESS_DEPRECATED_END
} // namespace one_hot
} // namespace utils
void inline resolve_axis(OneHot* op) { void inline resolve_axis(OneHot* op) {
if (op->get_input_size() < 1) { if (op->get_input_size() < 1) {
return; return;
@ -72,13 +37,12 @@ void inline resolve_axis(OneHot* op) {
} }
} }
template <class T> template <class T, class TRShape = result_shape_t<T>>
void shape_infer(const OneHot* op, std::vector<TRShape> shape_infer(const OneHot* op,
const std::vector<T>& input_shapes, const std::vector<T>& input_shapes,
std::vector<T>& output_shapes, const ITensorAccessor& ta = make_tensor_accessor()) {
const std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>>& constant_data = {}) { NODE_VALIDATION_CHECK(op, input_shapes.size() == 4);
NODE_VALIDATION_CHECK(op, input_shapes.size() == 4 && output_shapes.size() == 1); using DimType = typename T::value_type;
using DimType = typename std::iterator_traits<typename T::iterator>::value_type;
const auto& indices_shape = input_shapes[0]; const auto& indices_shape = input_shapes[0];
const auto& depth_shape = input_shapes[1]; const auto& depth_shape = input_shapes[1];
const auto& on_value_shape = input_shapes[2]; const auto& on_value_shape = input_shapes[2];
@ -96,6 +60,7 @@ void shape_infer(const OneHot* op,
off_value_shape.is_dynamic() || ngraph::is_scalar(off_value_shape.to_shape()), off_value_shape.is_dynamic() || ngraph::is_scalar(off_value_shape.to_shape()),
"off_value input must be scalar."); "off_value input must be scalar.");
auto output_shapes = std::vector<TRShape>(1);
auto& result_shape = output_shapes[0]; auto& result_shape = output_shapes[0];
if (indices_shape.rank().is_static()) { if (indices_shape.rank().is_static()) {
result_shape = indices_shape; result_shape = indices_shape;
@ -104,16 +69,18 @@ void shape_infer(const OneHot* op,
const auto axis = ov::normalize_axis(op, op->get_axis(), indices_rank + 1, -indices_rank - 1, indices_rank); const auto axis = ov::normalize_axis(op, op->get_axis(), indices_rank + 1, -indices_rank - 1, indices_rank);
OPENVINO_SUPPRESS_DEPRECATED_END OPENVINO_SUPPRESS_DEPRECATED_END
T depth_dim_as_shape; auto depth_as_shape =
if (utils::one_hot::get_data_as_shape_and_validate_sign<T>(1, op, depth_dim_as_shape, constant_data) && get_input_const_data_as_shape<TRShape>(op, 1, ta, util::GetNotNegative<typename DimType::value_type>(op));
depth_dim_as_shape.size() == 1) {
result_shape.insert(result_shape.begin() + axis, depth_dim_as_shape[0]); if (depth_as_shape && depth_as_shape->size() == 1) {
result_shape.insert(result_shape.begin() + axis, (*depth_as_shape)[0]);
} else { } else {
result_shape.insert(result_shape.begin() + axis, DimType()); result_shape.insert(result_shape.begin() + axis, DimType());
} }
} else { } else {
result_shape = PartialShape::dynamic(); result_shape = PartialShape::dynamic();
} }
return output_shapes;
} }
} // namespace v1 } // namespace v1
} // namespace op } // namespace op

View File

@ -25,13 +25,13 @@ public:
optional(const optional<T>& other) : m_has_value{other.m_has_value}, m_opt{} { optional(const optional<T>& other) : m_has_value{other.m_has_value}, m_opt{} {
if (other.m_has_value) { if (other.m_has_value) {
construct(*other); create(*other);
} }
} }
optional(optional<T>&& other) noexcept : m_has_value{other.m_has_value}, m_opt{} { optional(optional<T>&& other) noexcept : m_has_value{other.m_has_value}, m_opt{} {
if (other.m_has_value) { if (other.m_has_value) {
construct(std::move(*other)); create(std::move(*other));
} }
} }
@ -40,13 +40,8 @@ public:
} }
optional& operator=(const optional& other) { optional& operator=(const optional& other) {
if (other.m_has_value) { if (other) {
if (m_has_value) { *this = *other;
m_opt.m_value = *other;
} else {
construct(*other);
}
m_has_value = true;
} else { } else {
reset(); reset();
} }
@ -54,19 +49,24 @@ public:
} }
optional& operator=(optional&& other) noexcept { optional& operator=(optional&& other) noexcept {
if (other.m_has_value) { if (other) {
if (m_has_value) { *this = std::move(*other);
m_opt.m_value = std::move(*other);
} else {
construct(std::move(*other));
}
m_has_value = true;
} else { } else {
reset(); reset();
} }
return *this; return *this;
} }
template <class U = T>
optional& operator=(U&& value) {
if (m_has_value) {
m_opt.m_value = std::forward<U>(value);
} else {
emplace(std::forward<U>(value));
}
return *this;
}
constexpr operator bool() const { constexpr operator bool() const {
return m_has_value; return m_has_value;
} }
@ -84,7 +84,7 @@ public:
} }
T&& operator*() && noexcept { T&& operator*() && noexcept {
return m_opt.m_value; return std::move(m_opt.m_value);
} }
constexpr const T* operator->() const noexcept { constexpr const T* operator->() const noexcept {
@ -102,9 +102,15 @@ public:
} }
} }
template <class... Args>
void emplace(Args&&... args) {
create(std::forward<Args>(args)...);
m_has_value = true;
}
private: private:
template <class... Args> template <class... Args>
void construct(Args&&... args) { void create(Args&&... args) {
new (std::addressof(m_opt)) T(std::forward<Args>(args)...); new (std::addressof(m_opt)) T(std::forward<Args>(args)...);
} }
@ -123,7 +129,7 @@ private:
~Storage() {} ~Storage() {}
}; };
bool m_has_value; bool m_has_value = false;
Storage<T> m_opt; Storage<T> m_opt{};
}; };
} // namespace ov } // namespace ov

View File

@ -11,11 +11,10 @@
#include "utils.hpp" #include "utils.hpp"
namespace ov { namespace ov {
namespace op { namespace op {
namespace util { template <class TShape, class TRShape = result_shape_t<TShape>>
template <class TShape> std::vector<TRShape> shape_infer(const util::PadBase* op,
std::vector<TShape> shape_infer(const PadBase* op, const std::vector<TShape>& input_shapes,
const std::vector<TShape>& input_shapes, const ITensorAccessor& tensor_accessor = make_tensor_accessor()) {
const std::map<size_t, HostTensorPtr>& constant_data = {}) {
NODE_VALIDATION_CHECK(op, input_shapes.size() == 3 || input_shapes.size() == 4); NODE_VALIDATION_CHECK(op, input_shapes.size() == 3 || input_shapes.size() == 4);
const auto& pad_mode = op->get_pad_mode(); const auto& pad_mode = op->get_pad_mode();
@ -49,9 +48,10 @@ std::vector<TShape> shape_infer(const PadBase* op,
const auto& arg_shape = input_shapes[0]; const auto& arg_shape = input_shapes[0];
const auto& arg_shape_rank = arg_shape.rank(); const auto& arg_shape_rank = arg_shape.rank();
TShape output_shape; auto output_shapes = std::vector<TRShape>(1);
const auto pads_begin_coord = get_input_bounds<TShape, int64_t>(op, 1, constant_data); auto& output_shape = output_shapes[0];
const auto pads_end_coord = get_input_bounds<TShape, int64_t>(op, 2, constant_data); const auto pads_begin_coord = get_input_bounds<TRShape, int64_t>(op, 1, tensor_accessor);
const auto pads_end_coord = get_input_bounds<TRShape, int64_t>(op, 2, tensor_accessor);
if (arg_shape_rank.is_static()) { if (arg_shape_rank.is_static()) {
const auto arg_rank_len = arg_shape_rank.get_length(); const auto arg_rank_len = arg_shape_rank.get_length();
@ -135,33 +135,11 @@ std::vector<TShape> shape_infer(const PadBase* op,
")."); ").");
output_shape.resize(arg_shape_rank.get_length()); output_shape.resize(arg_shape_rank.get_length());
} }
return {output_shape};
} else { } else {
return {PartialShape::dynamic()}; output_shape = PartialShape::dynamic();
} }
}
} // namespace util return output_shapes;
namespace v1 {
template <class TShape>
void shape_infer(const Pad* op,
const std::vector<TShape>& input_shapes,
std::vector<TShape>& output_shapes,
const std::map<size_t, HostTensorPtr>& constant_data = {}) {
output_shapes = op::util::shape_infer(op, input_shapes, constant_data);
} }
} // namespace v1
namespace v12 {
template <class TShape>
void shape_infer(const Pad* op,
const std::vector<TShape>& input_shapes,
std::vector<TShape>& output_shapes,
const std::map<size_t, HostTensorPtr>& constant_data = {}) {
output_shapes = op::util::shape_infer(op, input_shapes, constant_data);
}
} // namespace v12
} // namespace op } // namespace op
} // namespace ov } // namespace ov

View File

@ -108,7 +108,7 @@ void apply_padding(const TOp* op,
pads_begin.reserve(num_spatial); pads_begin.reserve(num_spatial);
pads_end.reserve(num_spatial); pads_end.reserve(num_spatial);
auto data_dim = data_shape.cbegin() + spatial_dim_offset; auto data_dim = &data_shape[spatial_dim_offset];
auto pad_b = auto_pad == PadType::SAME_UPPER ? pads_begin.begin() : pads_end.begin(); auto pad_b = auto_pad == PadType::SAME_UPPER ? pads_begin.begin() : pads_end.begin();
auto pad_e = auto_pad == PadType::SAME_UPPER ? pads_end.begin() : pads_begin.begin(); auto pad_e = auto_pad == PadType::SAME_UPPER ? pads_end.begin() : pads_begin.begin();
@ -163,13 +163,13 @@ void valid_dilated_kernel_with_padding(const TOp* op,
* @param dilations Kernel dilations. * @param dilations Kernel dilations.
* @param out_shape Output shape for appending the spatial shape of pooling * @param out_shape Output shape for appending the spatial shape of pooling
*/ */
template <class TOp, class TShape, class TContainer> template <class TOp, class TShape, class TContainer, class TRShape>
void append_spatial_shape(const TOp* op, void append_spatial_shape(const TOp* op,
const TShape& data_shape, const TShape& data_shape,
const TContainer& pads_begin, const TContainer& pads_begin,
const TContainer& pads_end, const TContainer& pads_end,
const Strides& dilations, const Strides& dilations,
TShape& out_shape) { TRShape& out_shape) {
using namespace ov::util; using namespace ov::util;
const auto spatial_num = data_shape.size() - spatial_dim_offset; const auto spatial_num = data_shape.size() - spatial_dim_offset;
const auto is_ceil_mode = op->get_rounding_type() == RoundingType::CEIL; const auto is_ceil_mode = op->get_rounding_type() == RoundingType::CEIL;
@ -178,7 +178,7 @@ void append_spatial_shape(const TOp* op,
using TDim = typename TShape::value_type; using TDim = typename TShape::value_type;
const auto& dim_divide = is_ceil_mode ? dim::ceil_div<TDim> : dim::floor_div<TDim>; const auto& dim_divide = is_ceil_mode ? dim::ceil_div<TDim> : dim::floor_div<TDim>;
auto data_dim = data_shape.cbegin() + spatial_dim_offset; auto data_dim = &data_shape[spatial_dim_offset];
const auto& kernel = op->get_kernel(); const auto& kernel = op->get_kernel();
const auto& stride = op->get_strides(); const auto& stride = op->get_strides();
@ -207,14 +207,14 @@ void append_spatial_shape(const TOp* op,
/** /**
* @brief Shape inference helper used for pooling operators such Max Pool, Avg Pool. * @brief Shape inference helper used for pooling operators such Max Pool, Avg Pool.
*/ */
template <class TOp, class TShape, class TContainer> template <class TOp, class TShape, class TContainer, class TRShape = result_shape_t<TShape>>
TShape out_shape_infer(const TOp* op, TRShape out_shape_infer(const TOp* op,
const TShape& data_shape, const TShape& data_shape,
const TContainer& pads_begin, const TContainer& pads_begin,
const TContainer& pads_end, const TContainer& pads_end,
const Strides& dilations) { const Strides& dilations) {
const auto out_rank_size = spatial_dim_offset + op->get_kernel().size(); const auto out_rank_size = spatial_dim_offset + op->get_kernel().size();
TShape out_shape; TRShape out_shape;
if (data_shape.rank().is_static()) { if (data_shape.rank().is_static()) {
const auto& batch_size = data_shape[0]; const auto& batch_size = data_shape[0];
const auto& channel_count = data_shape[1]; const auto& channel_count = data_shape[1];
@ -239,11 +239,10 @@ TShape out_shape_infer(const TOp* op,
*/ */
template <class TShape, template <class TShape,
class TOp, class TOp,
class TRShape = result_shape_t<TShape>,
typename std::enable_if<std::is_same<TOp, v8::AdaptiveAvgPool>::value || typename std::enable_if<std::is_same<TOp, v8::AdaptiveAvgPool>::value ||
std::is_same<TOp, v8::AdaptiveMaxPool>::value>::type* = nullptr> std::is_same<TOp, v8::AdaptiveMaxPool>::value>::type* = nullptr>
TShape out_shape_infer(const TOp* op, TRShape out_shape_infer(const TOp* op, const std::vector<TShape>& input_shapes, const ITensorAccessor& ta) {
const std::vector<TShape>& input_shapes,
const std::map<size_t, HostTensorPtr>& constant_data = {}) {
NODE_VALIDATION_CHECK(op, input_shapes.size() == 2); NODE_VALIDATION_CHECK(op, input_shapes.size() == 2);
const auto& data_shape = input_shapes[0]; const auto& data_shape = input_shapes[0];
@ -258,7 +257,7 @@ TShape out_shape_infer(const TOp* op,
data_shape); data_shape);
OPENVINO_SUPPRESS_DEPRECATED_END OPENVINO_SUPPRESS_DEPRECATED_END
TShape output_shape; TRShape output_shape;
if (data_rank.is_static()) { if (data_rank.is_static()) {
auto num_of_spatial_dims = data_shape.size() - spatial_dim_offset; auto num_of_spatial_dims = data_shape.size() - spatial_dim_offset;
@ -270,7 +269,7 @@ TShape out_shape_infer(const TOp* op,
output_shape.reserve(data_shape.size()); output_shape.reserve(data_shape.size());
std::copy_n(data_shape.begin(), spatial_dim_offset, std::back_inserter(output_shape)); std::copy_n(data_shape.begin(), spatial_dim_offset, std::back_inserter(output_shape));
if (const auto spatial_dims = get_input_const_data_as_shape<TShape>(op, 1, constant_data)) { if (const auto spatial_dims = get_input_const_data_as_shape<TRShape>(op, 1, ta)) {
NODE_VALIDATION_CHECK(op, NODE_VALIDATION_CHECK(op,
num_of_spatial_dims == spatial_dims->size(), num_of_spatial_dims == spatial_dims->size(),
"Number of spatial dimensions is not compatible with input data rank"); "Number of spatial dimensions is not compatible with input data rank");

View File

@ -9,19 +9,11 @@
namespace ov { namespace ov {
namespace op { namespace op {
namespace v0 { namespace v0 {
template <class TShape> template <class TShape, class TRShape = result_shape_t<TShape>>
std::vector<TShape> shape_infer(const PriorBoxClustered* const op, std::vector<TRShape> shape_infer(const PriorBoxClustered* const op,
const std::vector<TShape>& input_shapes, const std::vector<TShape>& input_shapes,
const std::map<size_t, HostTensorPtr>& constant_data = {}) { const ITensorAccessor& ta = make_tensor_accessor()) {
return prior_box::shape_infer(op, input_shapes, constant_data); return prior_box::shape_infer(op, input_shapes, ta);
}
template <class TShape>
void shape_infer(const PriorBoxClustered* op,
const std::vector<TShape>& input_shapes,
std::vector<TShape>& output_shapes,
const std::map<size_t, HostTensorPtr>& constant_data = {}) {
output_shapes = prior_box::shape_infer(op, input_shapes, constant_data);
} }
} // namespace v0 } // namespace v0
} // namespace op } // namespace op

View File

@ -9,28 +9,20 @@
namespace ov { namespace ov {
namespace op { namespace op {
namespace v0 { namespace v0 {
template <class TShape> template <class TShape, class TRShape = result_shape_t<TShape>>
std::vector<TShape> shape_infer(const PriorBox* const op, std::vector<TRShape> shape_infer(const PriorBox* const op,
const std::vector<TShape>& input_shapes, const std::vector<TShape>& input_shapes,
const std::map<size_t, HostTensorPtr>& constant_data = {}) { const ITensorAccessor& ta = make_tensor_accessor()) {
return prior_box::shape_infer(op, input_shapes, constant_data); return prior_box::shape_infer(op, input_shapes, ta);
} }
} // namespace v0 } // namespace v0
namespace v8 { namespace v8 {
template <class TShape> template <class TShape, class TRShape = result_shape_t<TShape>>
std::vector<TShape> shape_infer(const PriorBox* const op, std::vector<TRShape> shape_infer(const PriorBox* const op,
const std::vector<TShape>& input_shapes, const std::vector<TShape>& input_shapes,
const std::map<size_t, HostTensorPtr>& constant_data = {}) { const ITensorAccessor& ta = make_tensor_accessor()) {
return prior_box::shape_infer(op, input_shapes, constant_data); return prior_box::shape_infer(op, input_shapes, ta);
}
template <class TShape>
void shape_infer(const PriorBox* op,
const std::vector<TShape>& input_shapes,
std::vector<TShape>& output_shapes,
const std::map<size_t, HostTensorPtr>& constant_data = {}) {
output_shapes = prior_box::shape_infer(op, input_shapes, constant_data);
} }
} // namespace v8 } // namespace v8

View File

@ -46,10 +46,10 @@ TDim number_of_priors(const v0::PriorBoxClustered* const op) {
return {static_cast<typename TDim::value_type>(op->get_attrs().widths.size())}; return {static_cast<typename TDim::value_type>(op->get_attrs().widths.size())};
} }
template <class TOp, class TShape> template <class TOp, class TShape, class TRShape = result_shape_t<TShape>>
std::vector<TShape> shape_infer(const TOp* const op, std::vector<TRShape> shape_infer(const TOp* const op,
const std::vector<TShape>& input_shapes, const std::vector<TShape>& input_shapes,
const std::map<size_t, HostTensorPtr>& constant_data) { const ITensorAccessor& ta = make_tensor_accessor()) {
NODE_VALIDATION_CHECK(op, input_shapes.size() == 2); NODE_VALIDATION_CHECK(op, input_shapes.size() == 2);
auto out_size_rank = input_shapes[0].rank(); auto out_size_rank = input_shapes[0].rank();
@ -62,12 +62,12 @@ std::vector<TShape> shape_infer(const TOp* const op,
img_size_rank, img_size_rank,
" and both must be 1-D"); " and both must be 1-D");
auto output_shapes = std::vector<TShape>(1, TShape{2}); auto output_shapes = std::vector<TRShape>(1, TRShape{2});
if (auto out_size = get_input_const_data_as_shape<TShape>(op, 0, constant_data)) { if (auto out_size = get_input_const_data_as_shape<TRShape>(op, 0, ta)) {
NODE_VALIDATION_CHECK(op, out_size->size() == 2, "Output size must have two elements. Got: ", out_size->size()); NODE_VALIDATION_CHECK(op, out_size->size() == 2, "Output size must have two elements. Got: ", out_size->size());
using TDim = typename TShape::value_type; using TDim = typename TRShape::value_type;
const auto num_of_priors = prior_box::number_of_priors<TDim>(op); const auto num_of_priors = prior_box::number_of_priors<TDim>(op);
output_shapes.front().push_back((*out_size)[0] * (*out_size)[1] * num_of_priors * 4); output_shapes.front().push_back((*out_size)[0] * (*out_size)[1] * num_of_priors * 4);
} else { } else {

View File

@ -11,9 +11,9 @@
namespace ov { namespace ov {
namespace op { namespace op {
namespace proposal { namespace proposal {
template <class TOp, class TShape> template <class TOp, class TShape, class TRShape = result_shape_t<TShape>>
TShape shape_infer_boxes(const TOp* op, const std::vector<TShape>& input_shapes) { TRShape shape_infer_boxes(const TOp* op, const std::vector<TShape>& input_shapes) {
using TDim = typename TShape::value_type; using TDim = typename TRShape::value_type;
NODE_VALIDATION_CHECK(op, input_shapes.size() == 3); NODE_VALIDATION_CHECK(op, input_shapes.size() == 3);
const auto& class_probs_ps = input_shapes[0]; const auto& class_probs_ps = input_shapes[0];
@ -43,7 +43,7 @@ TShape shape_infer_boxes(const TOp* op, const std::vector<TShape>& input_shapes)
const auto is_bbox_rank_dynamic = bbox_deltas_ps.rank().is_dynamic(); const auto is_bbox_rank_dynamic = bbox_deltas_ps.rank().is_dynamic();
TShape proposed_boxes_shape; TRShape proposed_boxes_shape;
proposed_boxes_shape.reserve(2); proposed_boxes_shape.reserve(2);
if (class_probs_ps.rank().is_static()) { if (class_probs_ps.rank().is_static()) {
@ -78,8 +78,8 @@ TShape shape_infer_boxes(const TOp* op, const std::vector<TShape>& input_shapes)
} // namespace proposal } // namespace proposal
namespace v0 { namespace v0 {
template <class TShape> template <class TShape, class TRShape = result_shape_t<TShape>>
std::vector<TShape> shape_infer(const Proposal* op, const std::vector<TShape>& input_shapes) { std::vector<TRShape> shape_infer(const Proposal* op, const std::vector<TShape>& input_shapes) {
return {ov::op::proposal::shape_infer_boxes(op, input_shapes)}; return {ov::op::proposal::shape_infer_boxes(op, input_shapes)};
} }
} // namespace v0 } // namespace v0
@ -89,9 +89,9 @@ std::vector<TShape> shape_infer(const Proposal* op, const std::vector<TShape>& i
namespace ov { namespace ov {
namespace op { namespace op {
namespace v4 { namespace v4 {
template <class TShape> template <class TShape, class TRShape = result_shape_t<TShape>>
std::vector<TShape> shape_infer(const Proposal* op, const std::vector<TShape>& input_shapes) { std::vector<TRShape> shape_infer(const Proposal* op, const std::vector<TShape>& input_shapes) {
auto output_shapes = std::vector<TShape>(2, ov::op::proposal::shape_infer_boxes(op, input_shapes)); auto output_shapes = std::vector<TRShape>(2, ov::op::proposal::shape_infer_boxes(op, input_shapes));
output_shapes[1].resize(1); output_shapes[1].resize(1);
return output_shapes; return output_shapes;
} }

View File

@ -64,8 +64,8 @@ void mode_attr(const TROIPooling* op) {
} // namespace psroi_pooling } // namespace psroi_pooling
namespace v0 { namespace v0 {
template <class TShape> template <class TShape, class TRShape = result_shape_t<TShape>>
std::vector<TShape> shape_infer(const PSROIPooling* op, const std::vector<TShape>& input_shapes) { std::vector<TRShape> shape_infer(const PSROIPooling* op, const std::vector<TShape>& input_shapes) {
NODE_VALIDATION_CHECK(op, input_shapes.size() == 2); NODE_VALIDATION_CHECK(op, input_shapes.size() == 2);
using namespace ov::util; using namespace ov::util;
@ -80,19 +80,15 @@ std::vector<TShape> shape_infer(const PSROIPooling* op, const std::vector<TShape
psroi_pooling::validate::feat_input_shape(op, feat_shape); psroi_pooling::validate::feat_input_shape(op, feat_shape);
roi_pooling::validate::rois_input_shape(op, rois_shape); roi_pooling::validate::rois_input_shape(op, rois_shape);
TShape out_shape; auto output_shapes = std::vector<TRShape>(1);
auto& out_shape = output_shapes.front();
out_shape.reserve(4); out_shape.reserve(4);
out_shape.emplace_back(rois_shape.rank().is_static() ? rois_shape[0] : dim::inf_bound); out_shape.emplace_back(rois_shape.rank().is_static() ? rois_shape[0] : dim::inf_bound);
out_shape.emplace_back(op->get_output_dim()); out_shape.emplace_back(op->get_output_dim());
out_shape.insert(out_shape.end(), 2, op->get_group_size()); out_shape.insert(out_shape.end(), 2, op->get_group_size());
return {out_shape}; return output_shapes;
}
template <class TShape>
void shape_infer(const PSROIPooling* op, const std::vector<TShape>& input_shapes, std::vector<TShape>& output_shapes) {
output_shapes = shape_infer(op, input_shapes);
} }
} // namespace v0 } // namespace v0
} // namespace op } // namespace op

View File

@ -13,76 +13,41 @@ namespace op {
namespace ShapeInferRange { namespace ShapeInferRange {
template <class T> template <class T, class TRShape = result_shape_t<T>>
inline bool get_data_as_double( std::vector<TRShape> range_shape_infer(const Node* op,
size_t idx, const std::vector<T>& input_shapes,
const ov::Node* op, bool output_is_integral,
std::vector<double>& axes_value, bool step_allows_zero,
const std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>>& constant_data = {}) { const ITensorAccessor& tensor_accessor) {
if (constant_data.count(idx)) { NODE_VALIDATION_CHECK(op, (input_shapes.size() == 3));
axes_value = ov::opset1::Constant(constant_data.at(idx)).cast_vector<double>();
} else {
const auto& constant = ov::as_type_ptr<ov::opset1::Constant>(op->get_input_node_shared_ptr(idx));
NODE_VALIDATION_CHECK(op, constant != nullptr, "Static shape inference lacks constant data on port ", idx);
axes_value = constant->cast_vector<double>();
}
return true;
}
template <>
inline bool get_data_as_double<ov::PartialShape>(
size_t idx,
const ov::Node* op,
std::vector<double>& axes_value,
const std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>>& constant_data) {
if (constant_data.count(idx)) {
axes_value = ov::opset1::Constant(constant_data.at(idx)).cast_vector<double>();
OPENVINO_SUPPRESS_DEPRECATED_START
} else if (const auto& constant = ov::get_constant_from_source(op->input_value(idx))) {
OPENVINO_SUPPRESS_DEPRECATED_END
axes_value = constant->cast_vector<double>();
} else {
return false;
}
return true;
}
template <class T>
void range_shape_infer(const Node* op,
const std::vector<T>& input_shapes,
std::vector<T>& output_shapes,
bool output_is_integral,
bool step_allows_zero,
const std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>>& constant_data) {
NODE_VALIDATION_CHECK(op, (input_shapes.size() == 3) && output_shapes.size() == 1);
NODE_VALIDATION_CHECK(op, input_shapes[0].rank().compatible(0), "'start' input is not a scalar"); NODE_VALIDATION_CHECK(op, input_shapes[0].rank().compatible(0), "'start' input is not a scalar");
NODE_VALIDATION_CHECK(op, input_shapes[1].rank().compatible(0), "'stop' input is not a scalar"); NODE_VALIDATION_CHECK(op, input_shapes[1].rank().compatible(0), "'stop' input is not a scalar");
NODE_VALIDATION_CHECK(op, input_shapes[2].rank().compatible(0), "'step' input is not a scalar"); NODE_VALIDATION_CHECK(op, input_shapes[2].rank().compatible(0), "'step' input is not a scalar");
std::vector<double> start_val; const auto start_val = get_input_const_data_as<TRShape, double>(op, 0, tensor_accessor);
std::vector<double> stop_val; const auto stop_val = get_input_const_data_as<TRShape, double>(op, 1, tensor_accessor);
std::vector<double> step_val; const auto step_val = get_input_const_data_as<TRShape, double>(op, 2, tensor_accessor);
double start = 0; double start = 0;
double stop = 0; double stop = 0;
double step = 0; double step = 0;
if (get_data_as_double<T>(0, op, start_val, constant_data)) { if (start_val) {
NODE_VALIDATION_CHECK(op, start_val.size() == 1); NODE_VALIDATION_CHECK(op, start_val->size() == 1);
start = start_val[0]; start = (*start_val)[0];
NODE_VALIDATION_CHECK(op, std::isfinite(start) && !std::isnan(start), "'start' cannot be nan or infinite."); NODE_VALIDATION_CHECK(op, std::isfinite(start) && !std::isnan(start), "'start' cannot be nan or infinite.");
} }
if (get_data_as_double<T>(1, op, stop_val, constant_data)) { if (stop_val) {
NODE_VALIDATION_CHECK(op, stop_val.size() == 1); NODE_VALIDATION_CHECK(op, stop_val->size() == 1);
stop = stop_val[0]; stop = (*stop_val)[0];
NODE_VALIDATION_CHECK(op, std::isfinite(stop) && !std::isnan(stop), "'stop' cannot be nan or infinite."); NODE_VALIDATION_CHECK(op, std::isfinite(stop) && !std::isnan(stop), "'stop' cannot be nan or infinite.");
} }
if (get_data_as_double<T>(2, op, step_val, constant_data)) { if (step_val) {
NODE_VALIDATION_CHECK(op, step_val.size() == 1); NODE_VALIDATION_CHECK(op, step_val->size() == 1);
step = step_val[0]; step = (*step_val)[0];
if (step_allows_zero) if (step_allows_zero)
NODE_VALIDATION_CHECK(op, std::isfinite(step) && !std::isnan(step), "'step' cannot be nan or infinite."); NODE_VALIDATION_CHECK(op, std::isfinite(step) && !std::isnan(step), "'step' cannot be nan or infinite.");
else else
@ -91,7 +56,8 @@ void range_shape_infer(const Node* op,
"'step' cannot be zero, nan, or infinite."); "'step' cannot be zero, nan, or infinite.");
} }
if (start_val.size() == 1 && stop_val.size() == 1 && step_val.size() == 1) { auto output_shapes = std::vector<TRShape>(1);
if (start_val && stop_val && step_val) {
// all inputs must be casted to output_type before // all inputs must be casted to output_type before
// the rounding for casting values are done towards zero // the rounding for casting values are done towards zero
if (output_is_integral) { if (output_is_integral) {
@ -110,45 +76,38 @@ void range_shape_infer(const Node* op,
double strided = ceil(fabs(span) / fabs(step)); double strided = ceil(fabs(span) / fabs(step));
output_shapes[0] = T{static_cast<uint32_t>(strided)}; output_shapes[0] = TRShape{static_cast<uint32_t>(strided)};
} else { } else {
output_shapes[0] = ov::PartialShape::dynamic(1); output_shapes[0] = ov::PartialShape::dynamic(1);
} }
return output_shapes;
} }
} // namespace ShapeInferRange } // namespace ShapeInferRange
namespace v0 { namespace v0 {
template <class T, class TRShape = result_shape_t<T>>
template <class T> std::vector<TRShape> shape_infer(const Range* op,
void shape_infer(const Range* op, const std::vector<T>& input_shapes,
const std::vector<T>& input_shapes, const ITensorAccessor& tensor_accessor = make_tensor_accessor()) {
std::vector<T>& output_shapes, return ShapeInferRange::range_shape_infer(op,
const std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>>& constant_data = {}) { input_shapes,
ShapeInferRange::range_shape_infer(op, op->get_input_element_type(0).is_integral_number(),
input_shapes, false,
output_shapes, tensor_accessor);
op->get_input_element_type(0).is_integral_number(),
false,
constant_data);
} }
} // namespace v0 } // namespace v0
namespace v4 { namespace v4 {
template <class T, class TRShape = result_shape_t<T>>
template <class T> std::vector<TRShape> shape_infer(const Range* op,
void shape_infer(const Range* op, const std::vector<T>& input_shapes,
const std::vector<T>& input_shapes, const ITensorAccessor& tensor_accessor = make_tensor_accessor()) {
std::vector<T>& output_shapes, return ShapeInferRange::range_shape_infer(op,
const std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>>& constant_data = {}) { input_shapes,
ShapeInferRange::range_shape_infer(op, op->get_output_type().is_integral_number(),
input_shapes, true,
output_shapes, tensor_accessor);
op->get_output_type().is_integral_number(),
true,
constant_data);
} }
} // namespace v4 } // namespace v4
} // namespace op } // namespace op
} // namespace ov } // namespace ov

View File

@ -24,29 +24,28 @@ DimType get_rdft_output_dimension(DimType d) {
return DimType(get_ouput_dimension_bound(d.get_min_length()), get_ouput_dimension_bound(d.get_max_length())); return DimType(get_ouput_dimension_bound(d.get_min_length()), get_ouput_dimension_bound(d.get_max_length()));
} }
template <class T> template <class T, class TRShape = result_shape_t<T>>
void shape_infer(const ov::op::v9::RDFT* op, std::vector<TRShape> shape_infer(const RDFT* op,
const std::vector<T>& input_shapes, const std::vector<T>& input_shapes,
std::vector<T>& output_shapes, const ITensorAccessor& ta = make_tensor_accessor()) {
const std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>>& constant_data = {}) { using DimType = typename T::value_type;
using DimType = typename std::iterator_traits<typename T::iterator>::value_type; NODE_VALIDATION_CHECK(op, (input_shapes.size() == 2 || input_shapes.size() == 3));
NODE_VALIDATION_CHECK(op, (input_shapes.size() == 2 || input_shapes.size() == 3) && output_shapes.size() == 1);
const auto& input_shape = input_shapes[0]; const auto& input_shape = input_shapes[0];
const auto& axes_shape = input_shapes[1]; const auto& axes_shape = input_shapes[1];
auto output_shapes = std::vector<TRShape>(1);
auto& output_shape = output_shapes[0]; auto& output_shape = output_shapes[0];
std::vector<int64_t> axes; auto axes = get_input_const_data_as<TRShape, int64_t>(op, 1, ta);
bool axes_are_known = get_data_as_int64<T>(1, op, axes, constant_data);
util::rfft_common_validation::shape_validation(op, util::rfft_common_validation::shape_validation(op,
input_shapes, input_shapes,
axes, *axes,
axes_are_known, static_cast<bool>(axes),
util::rfft_common_validation::RFFTKind::Forward); util::rfft_common_validation::RFFTKind::Forward);
if (input_shape.rank().is_dynamic()) { if (input_shape.rank().is_dynamic()) {
output_shape = ov::PartialShape::dynamic(); output_shape = ov::PartialShape::dynamic();
return; return output_shapes;
} }
output_shape = input_shape; output_shape = input_shape;
@ -54,37 +53,38 @@ void shape_infer(const ov::op::v9::RDFT* op,
const auto input_rank = input_shape.size(); const auto input_rank = input_shape.size();
if (axes_shape.rank().is_dynamic() || !axes_are_known) { if (axes_shape.rank().is_dynamic() || !axes) {
for (size_t i = 0; i < input_rank; ++i) { for (size_t i = 0; i < input_rank; ++i) {
output_shape[i] = ov::Dimension::dynamic(); output_shape[i] = ov::Dimension::dynamic();
} }
return; return output_shapes;
} }
const auto last_axis = axes.back(); const auto last_axis = axes->back();
if (input_shapes.size() == 2) { if (input_shapes.size() == 2) {
output_shape[last_axis] = get_rdft_output_dimension(input_shape[last_axis]); output_shape[last_axis] = get_rdft_output_dimension(input_shape[last_axis]);
return; return output_shapes;
} }
const auto& signal_size_shape = input_shapes[2]; const auto& signal_size_shape = input_shapes[2];
std::vector<int64_t> signal_size; auto signal_size = get_input_const_data_as<TRShape, int64_t>(op, 2, ta);
bool status_signal_size = get_data_as_int64<T>(2, op, signal_size, constant_data);
if (signal_size_shape.rank().is_dynamic() || !status_signal_size) { if (signal_size_shape.rank().is_dynamic() || !signal_size) {
output_shape[last_axis] = ov::Dimension::dynamic(); output_shape[last_axis] = ov::Dimension::dynamic();
return; return output_shapes;
} }
size_t num_of_axes = axes.size(); size_t num_of_axes = axes->size();
for (size_t i = 0; i < num_of_axes; ++i) { for (size_t i = 0; i < num_of_axes; ++i) {
const int64_t current_axis = axes[i]; const int64_t current_axis = (*axes)[i];
if (signal_size[i] != -1) { if ((*signal_size)[i] != -1) {
output_shape[current_axis] = DimType(signal_size[i]); output_shape[current_axis] = DimType((*signal_size)[i]);
} }
} }
output_shape[last_axis] = get_rdft_output_dimension(output_shape[last_axis]); output_shape[last_axis] = get_rdft_output_dimension(output_shape[last_axis]);
return output_shapes;
} }
} // namespace v9 } // namespace v9
} // namespace op } // namespace op

View File

@ -1,32 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <openvino/op/read_value.hpp>
#include "utils.hpp"
namespace ov {
namespace op {
template <class OpType, class ShapeType>
void read_value_shape_infer(const OpType* op,
const std::vector<ShapeType>& input_shapes,
std::vector<ShapeType>& output_shapes) {
copy_shape_infer(op, input_shapes, output_shapes);
}
namespace v3 {
template <class T>
void shape_infer(const ReadValue* op, const std::vector<T>& input_shapes, std::vector<T>& output_shapes) {
read_value_shape_infer(op, input_shapes, output_shapes);
}
} // namespace v3
namespace v6 {
template <class T>
void shape_infer(const ReadValue* op, const std::vector<T>& input_shapes, std::vector<T>& output_shapes) {
read_value_shape_infer(op, input_shapes, output_shapes);
}
} // namespace v6
} // namespace op
} // namespace ov

View File

@ -9,11 +9,13 @@
#include "utils.hpp" #include "utils.hpp"
template <class TShape> namespace ov {
std::vector<TShape> reduce_shape_infer(const ov::op::util::ReductionBase* op, namespace op {
bool keep_dims, template <class TShape, class TRShape = result_shape_t<TShape>>
const std::vector<TShape>& input_shapes, std::vector<TRShape> reduce_shape_infer(const util::ReductionBase* op,
const ov::ITensorAccessor& tensor_accessor = ov::make_tensor_accessor()) { bool keep_dims,
const std::vector<TShape>& input_shapes,
const ITensorAccessor& tensor_accessor = make_tensor_accessor()) {
NODE_VALIDATION_CHECK(op, input_shapes.size() == 2); NODE_VALIDATION_CHECK(op, input_shapes.size() == 2);
const auto& data_shape = input_shapes[0]; const auto& data_shape = input_shapes[0];
@ -21,7 +23,7 @@ std::vector<TShape> reduce_shape_infer(const ov::op::util::ReductionBase* op,
const auto& axes_shape = input_shapes[1]; const auto& axes_shape = input_shapes[1];
const auto& axes_rank = axes_shape.rank(); const auto& axes_rank = axes_shape.rank();
std::vector<TShape> output_shapes; std::vector<TRShape> output_shapes;
output_shapes.reserve(1); output_shapes.reserve(1);
NODE_VALIDATION_CHECK(op, NODE_VALIDATION_CHECK(op,
@ -29,7 +31,7 @@ std::vector<TShape> reduce_shape_infer(const ov::op::util::ReductionBase* op,
"Axes input must be a scalar or 1D input. Got: ", "Axes input must be a scalar or 1D input. Got: ",
axes_shape); axes_shape);
const auto axes_val = ov::op::get_input_const_data_as<TShape, int64_t>(op, 1, tensor_accessor); const auto axes_val = ov::op::get_input_const_data_as<TRShape, int64_t>(op, 1, tensor_accessor);
if (data_rank.is_static() && axes_val) { if (data_rank.is_static() && axes_val) {
OPENVINO_SUPPRESS_DEPRECATED_START OPENVINO_SUPPRESS_DEPRECATED_START
@ -38,13 +40,13 @@ std::vector<TShape> reduce_shape_infer(const ov::op::util::ReductionBase* op,
if (keep_dims) { if (keep_dims) {
output_shapes.push_back(data_shape); output_shapes.push_back(data_shape);
TShape& output_shape = output_shapes[0]; auto& output_shape = output_shapes[0];
for (const auto& axis : *axes_val) { for (const auto& axis : *axes_val) {
output_shape[axis] = 1; output_shape[axis] = 1;
} }
} else { } else {
output_shapes.resize(1); output_shapes.resize(1);
TShape& output_shape = output_shapes[0]; auto& output_shape = output_shapes[0];
for (size_t i = 0; i < data_shape.size(); ++i) { for (size_t i = 0; i < data_shape.size(); ++i) {
if (std::find(axes_val->begin(), axes_val->end(), i) == axes_val->end()) { if (std::find(axes_val->begin(), axes_val->end(), i) == axes_val->end()) {
output_shape.push_back(data_shape[i]); output_shape.push_back(data_shape[i]);
@ -62,35 +64,18 @@ std::vector<TShape> reduce_shape_infer(const ov::op::util::ReductionBase* op,
} }
// API: TensorAccessor to constant data // API: TensorAccessor to constant data
template <class TShape> template <class TShape, class TRShape = result_shape_t<TShape>>
std::vector<TShape> shape_infer(const ov::op::util::ArithmeticReductionKeepDims* op, std::vector<TRShape> shape_infer(const util::ArithmeticReductionKeepDims* op,
const std::vector<TShape>& input_shapes, const std::vector<TShape>& input_shapes,
const ov::ITensorAccessor& tensor_accessor = ov::make_tensor_accessor()) { const ITensorAccessor& tensor_accessor = make_tensor_accessor()) {
return reduce_shape_infer(op, op->get_keep_dims(), input_shapes, tensor_accessor); return reduce_shape_infer(op, op->get_keep_dims(), input_shapes, tensor_accessor);
} }
template <class TShape> template <class TShape, class TRShape = result_shape_t<TShape>>
std::vector<TShape> shape_infer(const ov::op::util::LogicalReductionKeepDims* op, std::vector<TRShape> shape_infer(const util::LogicalReductionKeepDims* op,
const std::vector<TShape>& input_shapes, const std::vector<TShape>& input_shapes,
const ov::ITensorAccessor& tensor_accessor = ov::make_tensor_accessor()) { const ITensorAccessor& tensor_accessor = make_tensor_accessor()) {
return reduce_shape_infer(op, op->get_keep_dims(), input_shapes, tensor_accessor); return reduce_shape_infer(op, op->get_keep_dims(), input_shapes, tensor_accessor);
} }
} // namespace op
// API for compatibility: Constant data map } // namespace ov
template <class TShape>
void shape_infer(const ov::op::util::ArithmeticReductionKeepDims* op,
const std::vector<TShape>& input_shapes,
std::vector<TShape>& output_shapes,
const std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>>& constant_data = {}) {
const auto tensor_accessor = ov::make_tensor_accessor(constant_data);
output_shapes = reduce_shape_infer(op, op->get_keep_dims(), input_shapes, tensor_accessor);
}
template <class TShape>
void shape_infer(const ov::op::util::LogicalReductionKeepDims* op,
const std::vector<TShape>& input_shapes,
std::vector<TShape>& output_shapes,
const std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>>& constant_data = {}) {
const auto tensor_accessor = ov::make_tensor_accessor(constant_data);
output_shapes = reduce_shape_infer(op, op->get_keep_dims(), input_shapes, tensor_accessor);
}

View File

@ -12,27 +12,28 @@ namespace ov {
namespace op { namespace op {
namespace v0 { namespace v0 {
template <class T> template <class T, class TRShape = result_shape_t<T>>
void shape_infer(const RegionYolo* op, const std::vector<T>& input_shapes, std::vector<T>& output_shapes) { std::vector<TRShape> shape_infer(const RegionYolo* op, const std::vector<T>& input_shapes) {
using DimType = typename std::iterator_traits<typename T::iterator>::value_type; using DimType = typename T::value_type;
NODE_VALIDATION_CHECK(op, (input_shapes.size() == 1) && output_shapes.size() == 1); NODE_VALIDATION_CHECK(op, (input_shapes.size() == 1));
const auto& input_shape = input_shapes[0]; const auto& input_shape = input_shapes[0];
const auto& input_rank = input_shape.rank(); const auto& input_rank = input_shape.rank();
auto output_shapes = std::vector<TRShape>(1);
auto& output_shape = output_shapes[0]; auto& output_shape = output_shapes[0];
NODE_VALIDATION_CHECK(op, input_rank.compatible(4), "Input must be a tensor of rank 4, but got ", input_rank); NODE_VALIDATION_CHECK(op, input_rank.compatible(4), "Input must be a tensor of rank 4, but got ", input_rank);
if (input_rank.is_static()) { if (input_rank.is_static()) {
int64_t end_axis = op->m_end_axis; int64_t end_axis = op->get_end_axis();
if (end_axis < 0) { if (end_axis < 0) {
end_axis += static_cast<int>(input_shape.size()); end_axis += static_cast<int>(input_shape.size());
} }
if (op->m_do_softmax) { if (op->get_do_softmax()) {
output_shape.resize(0); output_shape.resize(0);
OPENVINO_SUPPRESS_DEPRECATED_START OPENVINO_SUPPRESS_DEPRECATED_START
auto axis = ov::normalize_axis(op, op->m_axis, input_rank); auto axis = ov::normalize_axis(op, op->get_axis(), input_rank);
OPENVINO_SUPPRESS_DEPRECATED_END OPENVINO_SUPPRESS_DEPRECATED_END
DimType flat_dim = 1; DimType flat_dim = 1;
for (int64_t i = 0; i < axis; i++) { for (int64_t i = 0; i < axis; i++) {
@ -46,15 +47,16 @@ void shape_infer(const RegionYolo* op, const std::vector<T>& input_shapes, std::
output_shape.push_back(input_shape[i]); output_shape.push_back(input_shape[i]);
} }
} else { } else {
output_shape = T({input_shape[0], output_shape = TRShape({input_shape[0],
static_cast<typename DimType::value_type>( static_cast<typename DimType::value_type>(
(op->get_num_classes() + op->get_num_coords() + 1) * op->get_mask().size()), (op->get_num_classes() + op->get_num_coords() + 1) * op->get_mask().size()),
input_shape[2], input_shape[2],
input_shape[3]}); input_shape[3]});
} }
} else { } else {
output_shape = ov::PartialShape::dynamic(ov::Rank(1, 4)); output_shape = ov::PartialShape::dynamic(ov::Rank(1, 4));
} }
return output_shapes;
} }
} // namespace v0 } // namespace v0
} // namespace op } // namespace op

View File

@ -12,10 +12,11 @@ namespace ov {
namespace op { namespace op {
namespace v0 { namespace v0 {
template <class T> template <class T, class TRShape = result_shape_t<T>>
void shape_infer(const ReorgYolo* op, const std::vector<T>& input_shapes, std::vector<T>& output_shapes) { std::vector<TRShape> shape_infer(const ReorgYolo* op, const std::vector<T>& input_shapes) {
NODE_VALIDATION_CHECK(op, (input_shapes.size() == 1) && output_shapes.size() == 1); NODE_VALIDATION_CHECK(op, (input_shapes.size() == 1));
const auto& input_shape = input_shapes[0]; const auto& input_shape = input_shapes[0];
auto output_shapes = std::vector<TRShape>(1);
auto& output_shape = output_shapes[0]; auto& output_shape = output_shapes[0];
const auto& strides = op->get_strides(); const auto& strides = op->get_strides();
if (input_shape.rank().is_static()) { if (input_shape.rank().is_static()) {
@ -34,7 +35,7 @@ void shape_infer(const ReorgYolo* op, const std::vector<T>& input_shapes, std::v
static_cast<size_t>(input_shape[1].get_length()) >= (strides[0] * strides[0]), static_cast<size_t>(input_shape[1].get_length()) >= (strides[0] * strides[0]),
"For [N, C, H, W] input shape, C >= (stride*stride) is required."); "For [N, C, H, W] input shape, C >= (stride*stride) is required.");
output_shape = T({input_shape[0], input_shape[1]}); output_shape = TRShape({input_shape[0], input_shape[1]});
for (size_t i = 2; i < input_shape.size(); i++) { for (size_t i = 2; i < input_shape.size(); i++) {
if (input_shape[i].is_static()) if (input_shape[i].is_static())
@ -53,6 +54,7 @@ void shape_infer(const ReorgYolo* op, const std::vector<T>& input_shapes, std::v
} else { } else {
output_shape = ov::PartialShape::dynamic(input_shape.rank()); output_shape = ov::PartialShape::dynamic(input_shape.rank());
} }
return output_shapes;
} }
} // namespace v0 } // namespace v0
} // namespace op } // namespace op

View File

@ -6,11 +6,13 @@
#include <openvino/core/validation_util.hpp> #include <openvino/core/validation_util.hpp>
#include <openvino/op/reverse_sequence.hpp> #include <openvino/op/reverse_sequence.hpp>
#include "utils.hpp"
namespace ov { namespace ov {
namespace op { namespace op {
namespace v0 { namespace v0 {
template <class TShape> template <class TShape, class TRShape = result_shape_t<TShape>>
std::vector<TShape> shape_infer(const ReverseSequence* op, const std::vector<TShape>& input_shapes) { std::vector<TRShape> shape_infer(const ReverseSequence* op, const std::vector<TShape>& input_shapes) {
NODE_VALIDATION_CHECK(op, input_shapes.size() == 2); NODE_VALIDATION_CHECK(op, input_shapes.size() == 2);
using DimType = typename TShape::value_type; using DimType = typename TShape::value_type;
@ -28,7 +30,7 @@ std::vector<TShape> shape_infer(const ReverseSequence* op, const std::vector<TSh
seq_lengths_rank.compatible(1), seq_lengths_rank.compatible(1),
"Sequence lengths rank must be equal to 1. Got: ", "Sequence lengths rank must be equal to 1. Got: ",
seq_lengths_pshape); seq_lengths_pshape);
auto output_pshape = data_pshape; TRShape output_pshape = data_pshape;
if (data_rank.is_static() && seq_lengths_rank.is_static()) { if (data_rank.is_static() && seq_lengths_rank.is_static()) {
OPENVINO_SUPPRESS_DEPRECATED_START OPENVINO_SUPPRESS_DEPRECATED_START
const auto normalized_batch_axis = ov::normalize_axis(op, op->get_origin_batch_axis(), data_rank); const auto normalized_batch_axis = ov::normalize_axis(op, op->get_origin_batch_axis(), data_rank);
@ -51,13 +53,6 @@ std::vector<TShape> shape_infer(const ReverseSequence* op, const std::vector<TSh
return {output_pshape}; return {output_pshape};
} }
template <class TShape>
void shape_infer(const ReverseSequence* op,
const std::vector<TShape>& input_shapes,
std::vector<TShape>& output_shapes) {
output_shapes = shape_infer(op, input_shapes);
}
} // namespace v0 } // namespace v0
} // namespace op } // namespace op
} // namespace ov } // namespace ov

View File

@ -18,7 +18,7 @@ struct ClipNegative {
template <class T> template <class T>
constexpr value_type operator()(const T value) const { constexpr value_type operator()(const T value) const {
return (std::is_signed<T>::value && value < 0) ? 0 : static_cast<value_type>(value); return ov::cmp::lt(value, 0) ? 0 : static_cast<value_type>(value);
} }
}; };
} // namespace util } // namespace util
@ -36,10 +36,10 @@ namespace v1 {
* *
* \return Vector of output shapes with one shape. * \return Vector of output shapes with one shape.
*/ */
template <class TShape> template <class TShape, class TRShape = result_shape_t<TShape>>
std::vector<TShape> shape_infer(const Reverse* op, std::vector<TRShape> shape_infer(const Reverse* op,
const std::vector<TShape>& input_shapes, const std::vector<TShape>& input_shapes,
const ITensorAccessor& tensor_accessor = make_tensor_accessor()) { const ITensorAccessor& tensor_accessor = make_tensor_accessor()) {
NODE_VALIDATION_CHECK(op, input_shapes.size() == 2); NODE_VALIDATION_CHECK(op, input_shapes.size() == 2);
const auto& data_shape = input_shapes[0]; const auto& data_shape = input_shapes[0];
@ -82,24 +82,6 @@ std::vector<TShape> shape_infer(const Reverse* op,
return {data_shape}; return {data_shape};
} }
/**
* \brief Reverse shape inference
*
* \tparam TShape Type of shape.
*
* \param op Pointer to Reverse operator.
* \param input_shapes Input shapes of Reverse.
* \param output_shapes Output shapes of Reverse
* \param constant_data Map of constant data. Default empty.
*/
template <class TShape>
void shape_infer(const Reverse* op,
const std::vector<TShape>& input_shapes,
std::vector<TShape>& output_shapes,
const std::map<size_t, std::reference_wrapper<const ov::Tensor>>& constant_data = {}) {
output_shapes = shape_infer(op, input_shapes);
}
} // namespace v1 } // namespace v1
} // namespace op } // namespace op
} // namespace ov } // namespace ov

View File

@ -29,18 +29,15 @@ void validate_inputs_rank(const op::util::RNNCellBase* op,
// Output shape layout: // Output shape layout:
// output_shapes[0...num_state_nodes]: [batch_size, hidden_size] // Rank always 2 // output_shapes[0...num_state_nodes]: [batch_size, hidden_size] // Rank always 2
template <class TShape> template <class TShape, class TRShape = result_shape_t<TShape>>
std::vector<TShape> cell_base_shape_infer(const op::util::RNNCellBase* op, std::vector<TRShape> cell_base_shape_infer(const op::util::RNNCellBase* op,
const std::vector<TShape>& input_shapes, const std::vector<TShape>& input_shapes,
size_t num_gates, size_t num_gates,
size_t num_state_nodes, size_t num_state_nodes,
bool linear_before_reset = false) { bool linear_before_reset = false) {
const auto num_inputs = 4 + num_state_nodes; const auto num_inputs = 4 + num_state_nodes;
NODE_VALIDATION_CHECK(op, input_shapes.size() >= num_inputs, "Incorrect number of shapes has been provided."); NODE_VALIDATION_CHECK(op, input_shapes.size() >= num_inputs, "Incorrect number of shapes has been provided.");
std::vector<TShape> output_shapes;
output_shapes.reserve(num_state_nodes);
std::vector<Rank> expected_in_ranks; std::vector<Rank> expected_in_ranks;
expected_in_ranks.reserve(num_inputs); expected_in_ranks.reserve(num_inputs);
expected_in_ranks.insert(expected_in_ranks.end(), 1 + num_state_nodes, Rank(2)); expected_in_ranks.insert(expected_in_ranks.end(), 1 + num_state_nodes, Rank(2));
@ -128,23 +125,23 @@ std::vector<TShape> cell_base_shape_infer(const op::util::RNNCellBase* op,
} }
} }
return {num_state_nodes, TShape{merged_batch_size, merged_hidden_size}}; return {num_state_nodes, TRShape{merged_batch_size, merged_hidden_size}};
} }
// Output shapes layout: // Output shapes layout:
// output_shapes[0]: [batch_size, num_directions, seq_length, hidden_size] // Rank always 4 // output_shapes[0]: [batch_size, num_directions, seq_length, hidden_size] // Rank always 4
// output_shapes[1... num_state_nodes]: [batch_size, num_directions, hidden_size] // Rank always 3 // output_shapes[1... num_state_nodes]: [batch_size, num_directions, hidden_size] // Rank always 3
template <class TShape> template <class TShape, class TRShape = result_shape_t<TShape>>
std::vector<TShape> seq_base_shape_infer(const op::util::RNNCellBase* op, std::vector<TRShape> seq_base_shape_infer(const op::util::RNNCellBase* op,
const std::vector<TShape>& input_shapes, const std::vector<TShape>& input_shapes,
size_t num_gates, size_t num_gates,
size_t num_state_nodes, size_t num_state_nodes,
op::RecurrentSequenceDirection direction, op::RecurrentSequenceDirection direction,
bool linear_before_reset = false) { bool linear_before_reset = false) {
const auto num_inputs = 5 + num_state_nodes; const auto num_inputs = 5 + num_state_nodes;
NODE_VALIDATION_CHECK(op, input_shapes.size() >= num_inputs, "Incorrect number of shapes has been provided."); NODE_VALIDATION_CHECK(op, input_shapes.size() >= num_inputs, "Incorrect number of shapes has been provided.");
std::vector<TShape> output_shapes; std::vector<TRShape> output_shapes;
output_shapes.reserve(1 + num_state_nodes); output_shapes.reserve(1 + num_state_nodes);
std::vector<Rank> expected_in_ranks; std::vector<Rank> expected_in_ranks;
@ -265,14 +262,14 @@ std::vector<TShape> seq_base_shape_infer(const op::util::RNNCellBase* op,
} }
// Y output // Y output
output_shapes.push_back(TShape{merged_batch_size, output_shapes.push_back(TRShape{merged_batch_size,
merged_num_directions, merged_num_directions,
x_pshape.rank().is_static() ? x_pshape[1] : DimType(), x_pshape.rank().is_static() ? x_pshape[1] : DimType(),
merged_hidden_size}); merged_hidden_size});
// Ho, Co outputs // Ho, Co outputs
output_shapes.insert(output_shapes.end(), output_shapes.insert(output_shapes.end(),
num_state_nodes, num_state_nodes,
TShape{merged_batch_size, merged_num_directions, merged_hidden_size}); TRShape{merged_batch_size, merged_num_directions, merged_hidden_size});
return output_shapes; return output_shapes;
} }
} // namespace rnn } // namespace rnn

View File

@ -9,15 +9,11 @@ namespace ov {
namespace op { namespace op {
namespace v0 { namespace v0 {
template <class TShape> template <class TShape>
std::vector<TShape> shape_infer(const RNNCell* op, const std::vector<TShape>& input_shapes) { std::vector<result_shape_t<TShape>> shape_infer(const RNNCell* op, const std::vector<TShape>& input_shapes) {
constexpr auto num_gates = 1; constexpr auto num_gates = 1;
constexpr auto num_state_nodes = 1; constexpr auto num_state_nodes = 1;
return rnn::cell_base_shape_infer(op, input_shapes, num_gates, num_state_nodes); return rnn::cell_base_shape_infer(op, input_shapes, num_gates, num_state_nodes);
} }
template <class TShape>
void shape_infer(const RNNCell* op, const std::vector<TShape>& input_shapes, std::vector<TShape>& output_shapes) {
output_shapes = shape_infer(op, input_shapes);
}
} // namespace v0 } // namespace v0
} // namespace op } // namespace op
} // namespace ov } // namespace ov

View File

@ -9,7 +9,7 @@ namespace ov {
namespace op { namespace op {
namespace v5 { namespace v5 {
template <class TShape> template <class TShape>
std::vector<TShape> shape_infer(const RNNSequence* op, const std::vector<TShape>& input_shapes) { std::vector<result_shape_t<TShape>> shape_infer(const RNNSequence* op, const std::vector<TShape>& input_shapes) {
constexpr auto num_gates = 1; constexpr auto num_gates = 1;
constexpr auto num_state_nodes = 1; constexpr auto num_state_nodes = 1;
return rnn::seq_base_shape_infer(op, input_shapes, num_gates, num_state_nodes, op->get_direction()); return rnn::seq_base_shape_infer(op, input_shapes, num_gates, num_state_nodes, op->get_direction());

View File

@ -48,8 +48,8 @@ inline void batch_indicies_et(const Node* const op) {
} }
} // namespace validate } // namespace validate
template <class OpType, class TShape> template <class OpType, class TShape, class TRShape = result_shape_t<TShape>>
std::vector<TShape> shape_infer(const OpType* op, const std::vector<TShape>& input_shapes) { std::vector<TRShape> shape_infer(const OpType* op, const std::vector<TShape>& input_shapes) {
NODE_VALIDATION_CHECK(op, input_shapes.size() == 3); NODE_VALIDATION_CHECK(op, input_shapes.size() == 3);
using TDim = typename TShape::value_type; using TDim = typename TShape::value_type;
@ -62,7 +62,7 @@ std::vector<TShape> shape_infer(const OpType* op, const std::vector<TShape>& inp
const auto input_ps_rank = input_ps.rank(); const auto input_ps_rank = input_ps.rank();
const auto batch_indices_ps_rank = batch_indices_ps.rank(); const auto batch_indices_ps_rank = batch_indices_ps.rank();
auto output_shapes = std::vector<TShape>(1); auto output_shapes = std::vector<TRShape>(1);
auto& out_shape = output_shapes.front(); auto& out_shape = output_shapes.front();
out_shape.reserve(4); out_shape.reserve(4);
@ -103,27 +103,17 @@ std::vector<TShape> shape_infer(const OpType* op, const std::vector<TShape>& inp
} // namespace roi_align } // namespace roi_align
namespace v3 { namespace v3 {
template <class TShape> template <class TShape, class TRShape = result_shape_t<TShape>>
std::vector<TShape> shape_infer(const ROIAlign* op, const std::vector<TShape>& input_shapes) { std::vector<TRShape> shape_infer(const ROIAlign* op, const std::vector<TShape>& input_shapes) {
return roi_align::shape_infer(op, input_shapes); return roi_align::shape_infer(op, input_shapes);
} }
template <class TShape>
void shape_infer(const ROIAlign* op, const std::vector<TShape>& input_shapes, std::vector<TShape>& output_shapes) {
output_shapes = shape_infer(op, input_shapes);
}
} // namespace v3 } // namespace v3
namespace v9 { namespace v9 {
template <class TShape> template <class TShape, class TRShape = result_shape_t<TShape>>
std::vector<TShape> shape_infer(const ROIAlign* op, const std::vector<TShape>& input_shapes) { std::vector<TRShape> shape_infer(const ROIAlign* op, const std::vector<TShape>& input_shapes) {
return roi_align::shape_infer(op, input_shapes); return roi_align::shape_infer(op, input_shapes);
} }
template <class TShape>
void shape_infer(const ROIAlign* op, const std::vector<TShape>& input_shapes, std::vector<TShape>& output_shapes) {
output_shapes = shape_infer(op, input_shapes);
}
} // namespace v9 } // namespace v9
} // namespace op } // namespace op
} // namespace ov } // namespace ov

View File

@ -9,6 +9,7 @@
#include "compare.hpp" #include "compare.hpp"
#include "dimension_util.hpp" #include "dimension_util.hpp"
#include "openvino/op/roi_pooling.hpp" #include "openvino/op/roi_pooling.hpp"
#include "utils.hpp"
namespace ov { namespace ov {
namespace op { namespace op {
@ -77,8 +78,8 @@ void method_attr(const TROIPooling* op) {
} // namespace roi_pooling } // namespace roi_pooling
namespace v0 { namespace v0 {
template <class TShape> template <class TShape, class TRShape = result_shape_t<TShape>>
std::vector<TShape> shape_infer(const ROIPooling* op, const std::vector<TShape>& input_shapes) { std::vector<TRShape> shape_infer(const ROIPooling* op, const std::vector<TShape>& input_shapes) {
NODE_VALIDATION_CHECK(op, input_shapes.size() == 2); NODE_VALIDATION_CHECK(op, input_shapes.size() == 2);
using namespace ov::util; using namespace ov::util;
@ -92,19 +93,15 @@ std::vector<TShape> shape_infer(const ROIPooling* op, const std::vector<TShape>&
roi_pooling::validate::scale_attr(op); roi_pooling::validate::scale_attr(op);
roi_pooling::validate::method_attr(op); roi_pooling::validate::method_attr(op);
TShape out_shape; auto output_shapes = std::vector<TRShape>(1);
auto& out_shape = output_shapes.front();
out_shape.reserve(4); out_shape.reserve(4);
out_shape.emplace_back(rois_shape.rank().is_static() ? rois_shape[0] : dim::inf_bound); out_shape.emplace_back(rois_shape.rank().is_static() ? rois_shape[0] : dim::inf_bound);
out_shape.emplace_back(feat_rank.is_static() ? feat_shape[1] : dim::inf_bound); out_shape.emplace_back(feat_rank.is_static() ? feat_shape[1] : dim::inf_bound);
std::copy(op->get_output_roi().cbegin(), op->get_output_roi().cend(), std::back_inserter(out_shape)); std::copy(op->get_output_roi().cbegin(), op->get_output_roi().cend(), std::back_inserter(out_shape));
return {out_shape}; return output_shapes;
}
template <class TShape>
void shape_infer(const ROIPooling* op, const std::vector<TShape>& input_shapes, std::vector<TShape>& output_shapes) {
output_shapes = shape_infer(op, input_shapes);
} }
} // namespace v0 } // namespace v0
} // namespace op } // namespace op

View File

@ -12,10 +12,10 @@ namespace ov {
namespace op { namespace op {
namespace v7 { namespace v7 {
template <class TShape> template <class TShape, class TRShape = result_shape_t<TShape>>
std::vector<TShape> shape_infer(const Roll* op, std::vector<TRShape> shape_infer(const Roll* op,
const std::vector<TShape>& input_shapes, const std::vector<TShape>& input_shapes,
const std::map<size_t, HostTensorPtr>& constant_data = {}) { const ITensorAccessor& ta = make_tensor_accessor()) {
NODE_VALIDATION_CHECK(op, input_shapes.size() == 3); NODE_VALIDATION_CHECK(op, input_shapes.size() == 3);
const auto& data_pshape = input_shapes[0]; const auto& data_pshape = input_shapes[0];
@ -39,7 +39,7 @@ std::vector<TShape> shape_infer(const Roll* op,
"Axes must be a scalar or 1D tensor."); "Axes must be a scalar or 1D tensor.");
if (data_pshape.rank().is_static()) { if (data_pshape.rank().is_static()) {
if (const auto& axes = get_input_const_data_as<TShape, int64_t>(op, 2, constant_data)) { if (const auto axes = get_input_const_data_as<TRShape, int64_t>(op, 2, ta)) {
OPENVINO_SUPPRESS_DEPRECATED_START OPENVINO_SUPPRESS_DEPRECATED_START
ov::normalize_axes(op, data_pshape.size(), *axes); ov::normalize_axes(op, data_pshape.size(), *axes);
OPENVINO_SUPPRESS_DEPRECATED_END OPENVINO_SUPPRESS_DEPRECATED_END
@ -48,15 +48,6 @@ std::vector<TShape> shape_infer(const Roll* op,
return {data_pshape}; return {data_pshape};
} }
template <class TShape>
void shape_infer(const Roll* op,
const std::vector<TShape>& input_shapes,
std::vector<TShape>& output_shapes,
const std::map<size_t, HostTensorPtr>& constant_data = {}) {
output_shapes = shape_infer<TShape>(op, input_shapes, constant_data);
}
} // namespace v7 } // namespace v7
} // namespace op } // namespace op
} // namespace ov } // namespace ov

View File

@ -9,13 +9,12 @@
#include "utils.hpp" #include "utils.hpp"
namespace ov { namespace ov {
namespace op { namespace op {
namespace util {
template <class TShape> template <class TShape, class TRShape = result_shape_t<TShape>>
std::vector<TShape> shape_infer(const util::ScatterElementsUpdateBase* op, std::vector<TRShape> shape_infer(const util::ScatterElementsUpdateBase* op,
const std::vector<TShape>& input_shapes, const std::vector<TShape>& input_shapes,
const std::map<size_t, HostTensorPtr>& constant_data = {}) { const ITensorAccessor& ta = make_tensor_accessor()) {
NODE_VALIDATION_CHECK(op, input_shapes.size() == 4); NODE_VALIDATION_CHECK(op, input_shapes.size() == 4);
const auto& data_shape = input_shapes[0]; const auto& data_shape = input_shapes[0];
@ -51,7 +50,7 @@ std::vector<TShape> shape_infer(const util::ScatterElementsUpdateBase* op,
updates_shape); updates_shape);
if (data_shape.rank().is_static()) { if (data_shape.rank().is_static()) {
if (const auto axis_input = get_input_const_data_as<TShape, int64_t>(op, 3, constant_data)) { if (const auto axis_input = get_input_const_data_as<TShape, int64_t>(op, 3, ta)) {
OPENVINO_SUPPRESS_DEPRECATED_START OPENVINO_SUPPRESS_DEPRECATED_START
ov::normalize_axis(op, (*axis_input)[0], data_rank); ov::normalize_axis(op, (*axis_input)[0], data_rank);
OPENVINO_SUPPRESS_DEPRECATED_END OPENVINO_SUPPRESS_DEPRECATED_END
@ -59,23 +58,13 @@ std::vector<TShape> shape_infer(const util::ScatterElementsUpdateBase* op,
} }
return {data_shape}; return {data_shape};
} }
} // namespace util
namespace v3 {
template <class TShape>
void shape_infer(const ScatterElementsUpdate* op,
const std::vector<TShape>& input_shapes,
std::vector<TShape>& output_shapes,
const std::map<size_t, HostTensorPtr>& constant_data = {}) {
output_shapes = util::shape_infer(op, input_shapes, constant_data);
}
} // namespace v3
namespace v12 { namespace v12 {
template <class TShape> template <class TShape, class TRShape = result_shape_t<TShape>>
void shape_infer(const ScatterElementsUpdate* op, std::vector<TRShape> shape_infer(const ScatterElementsUpdate* op,
const std::vector<TShape>& input_shapes, const std::vector<TShape>& input_shapes,
std::vector<TShape>& output_shapes, const ITensorAccessor& ta = make_tensor_accessor()) {
const std::map<size_t, HostTensorPtr>& constant_data = {}) { return ov::op::shape_infer(op, input_shapes, ta);
output_shapes = util::shape_infer(op, input_shapes, constant_data);
} }
} // namespace v12 } // namespace v12
} // namespace op } // namespace op

View File

@ -11,8 +11,8 @@
namespace ov { namespace ov {
namespace op { namespace op {
template <class TShape> template <class TShape, class TRShape = result_shape_t<TShape>>
std::vector<TShape> shape_infer(const util::ScatterNDBase* op, const std::vector<TShape>& input_shapes) { std::vector<TRShape> shape_infer(const util::ScatterNDBase* op, const std::vector<TShape>& input_shapes) {
NODE_VALIDATION_CHECK(op, input_shapes.size() == 3); NODE_VALIDATION_CHECK(op, input_shapes.size() == 3);
const auto& inputs_shape = input_shapes[util::ScatterNDBase::INPUTS]; const auto& inputs_shape = input_shapes[util::ScatterNDBase::INPUTS];
const auto& indices_shape = input_shapes[util::ScatterNDBase::INDICES]; const auto& indices_shape = input_shapes[util::ScatterNDBase::INDICES];
@ -65,12 +65,5 @@ std::vector<TShape> shape_infer(const util::ScatterNDBase* op, const std::vector
return {inputs_shape}; return {inputs_shape};
} }
template <class TShape>
void shape_infer(const util::ScatterNDBase* op,
const std::vector<TShape>& input_shapes,
std::vector<TShape>& output_shapes) {
output_shapes = shape_infer(op, input_shapes);
}
} // namespace op } // namespace op
} // namespace ov } // namespace ov

View File

@ -6,41 +6,46 @@
#include <openvino/op/select.hpp> #include <openvino/op/select.hpp>
#include "utils.hpp"
namespace ov { namespace ov {
namespace op { namespace op {
namespace v1 { namespace v1 {
template <class T> template <class TShape, class TRShape = result_shape_t<TShape>>
void shape_infer(const Select* op, const std::vector<T>& input_shapes, std::vector<T>& output_shapes) { std::vector<TRShape> shape_infer(const Select* op, const std::vector<TShape>& input_shapes) {
NODE_VALIDATION_CHECK(op, input_shapes.size() == 3 && output_shapes.size() == 1); NODE_VALIDATION_CHECK(op, input_shapes.size() == 3);
const auto& broadcast_spec = op->get_auto_broadcast(); const auto& broadcast_spec = op->get_auto_broadcast();
auto& result_shape = output_shapes[0]; TRShape result_shape;
if (broadcast_spec.m_type == op::AutoBroadcastType::PDPD) { if (broadcast_spec.m_type == op::AutoBroadcastType::PDPD) {
result_shape = input_shapes[1]; // 'then' tensor result_shape = input_shapes[1]; // 'then' tensor
// in PDPD type, Broadcast-merging 'else' into 'then' one way not each other. // in PDPD type, Broadcast-merging 'else' into 'then' one way not each other.
NODE_VALIDATION_CHECK(op, NODE_VALIDATION_CHECK(op,
T::broadcast_merge_into(result_shape, input_shapes[2], broadcast_spec), TRShape::broadcast_merge_into(result_shape, input_shapes[2], broadcast_spec),
"'Else' tensor shape is not broadcastable."); "'Else' tensor shape is not broadcastable.");
NODE_VALIDATION_CHECK(op, NODE_VALIDATION_CHECK(op,
T::broadcast_merge_into(result_shape, input_shapes[0], broadcast_spec), TRShape::broadcast_merge_into(result_shape, input_shapes[0], broadcast_spec),
"'Cond' tensor shape is not broadcastable."); "'Cond' tensor shape is not broadcastable.");
} else { } else {
result_shape = input_shapes[2]; result_shape = input_shapes[2];
for (int input_port = 1; input_port >= 0; input_port--) { for (int input_port = 1; input_port >= 0; input_port--) {
if (broadcast_spec.m_type == op::AutoBroadcastType::NONE) { if (broadcast_spec.m_type == op::AutoBroadcastType::NONE) {
NODE_VALIDATION_CHECK(op, NODE_VALIDATION_CHECK(op,
T::merge_into(result_shape, input_shapes[input_port]), TRShape::merge_into(result_shape, input_shapes[input_port]),
"Argument shapes are inconsistent."); "Argument shapes are inconsistent.");
} else if (broadcast_spec.m_type == op::AutoBroadcastType::NUMPY) { } else if (broadcast_spec.m_type == op::AutoBroadcastType::NUMPY) {
NODE_VALIDATION_CHECK(op, NODE_VALIDATION_CHECK(
T::broadcast_merge_into(result_shape, input_shapes[input_port], broadcast_spec), op,
"Argument shapes are inconsistent."); TRShape::broadcast_merge_into(result_shape, input_shapes[input_port], broadcast_spec),
"Argument shapes are inconsistent.");
} else { } else {
NODE_VALIDATION_CHECK(op, false, "Unsupported auto broadcast specification"); NODE_VALIDATION_CHECK(op, false, "Unsupported auto broadcast specification");
} }
} }
} }
return {result_shape};
} }
} // namespace v1 } // namespace v1

View File

@ -10,34 +10,33 @@
#include "utils.hpp" #include "utils.hpp"
template <class T> template <class T, class TRShape = ov::result_shape_t<T>>
void shape_infer(const ov::opset1::Reshape* op, std::vector<TRShape> shape_infer(const ov::op::v1::Reshape* op,
const std::vector<T>& input_shapes, const std::vector<T>& input_shapes,
std::vector<T>& output_shapes, const ov::ITensorAccessor& ta = ov::make_tensor_accessor()) {
const std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>>& constant_data = {}) { NODE_VALIDATION_CHECK(op, input_shapes.size() == 2);
NODE_VALIDATION_CHECK(op, input_shapes.size() == 2 && output_shapes.size() == 1); auto output_pattern = ov::op::get_input_const_data_as<TRShape, int64_t>(op, 1, ta);
std::vector<int64_t> output_pattern; NODE_VALIDATION_CHECK(op, output_pattern, "Shape inference lacks input data");
bool status = get_data_as_int64<T>(1, op, output_pattern, constant_data);
NODE_VALIDATION_CHECK(op, status, "Shape inference lacks input data");
auto& input_shape = input_shapes[0]; auto& input_shape = input_shapes[0];
OPENVINO_ASSERT(input_shape.is_static()); OPENVINO_ASSERT(input_shape.is_static());
auto output_shapes = std::vector<TRShape>(1);
auto& output_shape = output_shapes[0]; auto& output_shape = output_shapes[0];
output_shape.resize(output_pattern.size()); output_shape.resize(output_pattern->size());
auto output_rank = input_shapes[1].size() == 0 ? 0 : input_shapes[1][0]; auto output_rank = input_shapes[1].size() == 0 ? 0 : input_shapes[1][0];
if (output_rank == 0 && output_shape.size() != 0) { if (output_rank == 0 && output_shape.size() != 0) {
output_pattern.clear(); output_pattern->clear();
OPENVINO_ASSERT(output_pattern.size() == 1); OPENVINO_ASSERT(output_pattern->size() == 1);
NODE_VALIDATION_CHECK(op, output_pattern[0] == 1, "The value of scalar shape pattern should be equal to 1!"); NODE_VALIDATION_CHECK(op, (*output_pattern)[0] == 1, "The value of scalar shape pattern should be equal to 1!");
} }
auto special_zero = op->get_special_zero(); auto special_zero = op->get_special_zero();
size_t output_product(1); size_t output_product(1);
int64_t minus_one_idx = -1; int64_t minus_one_idx = -1;
for (size_t i = 0; i < output_pattern.size(); ++i) { for (size_t i = 0; i < output_pattern->size(); ++i) {
if (output_pattern[i] == -1) { // resolving everything except -1 if ((*output_pattern)[i] == -1) { // resolving everything except -1
NODE_VALIDATION_CHECK(op, NODE_VALIDATION_CHECK(op,
minus_one_idx == -1, minus_one_idx == -1,
"More than one element of output shape pattern has value of -1"); "More than one element of output shape pattern has value of -1");
@ -45,7 +44,7 @@ void shape_infer(const ov::opset1::Reshape* op,
continue; continue;
} }
auto pattern_dim = output_pattern[i]; auto pattern_dim = (*output_pattern)[i];
if (pattern_dim == 0 && special_zero) { if (pattern_dim == 0 && special_zero) {
NODE_VALIDATION_CHECK(op, i < input_shape.size(), "'0' dimension is out of range"); NODE_VALIDATION_CHECK(op, i < input_shape.size(), "'0' dimension is out of range");
output_shape[i] = input_shape[i]; output_shape[i] = input_shape[i];
@ -59,7 +58,7 @@ void shape_infer(const ov::opset1::Reshape* op,
} }
size_t input_product(1); size_t input_product(1);
for (size_t i = 0; i < input_shape.size(); ++i) { for (size_t i = 0; i < input_shape.size(); ++i) {
if (i < output_pattern.size() && output_pattern[i] == 0 && special_zero) if (i < output_pattern->size() && (*output_pattern)[i] == 0 && special_zero)
continue; continue;
input_product = input_shape[i].get_length() * input_product; input_product = input_shape[i].get_length() * input_product;
} }
@ -81,7 +80,7 @@ void shape_infer(const ov::opset1::Reshape* op,
} }
} }
size_t zero_dims = std::count_if(output_pattern.begin(), output_pattern.end(), [](const int64_t& dim) { size_t zero_dims = std::count_if(output_pattern->begin(), output_pattern->end(), [](const int64_t& dim) {
return dim == 0; return dim == 0;
}); });
@ -94,41 +93,44 @@ void shape_infer(const ov::opset1::Reshape* op,
output_shape, output_shape,
" is incompatible with input shape ", " is incompatible with input shape ",
input_shape); input_shape);
return output_shapes;
} }
template <class T> namespace ov {
inline void dynamic_shape(T& output_shape) { namespace op {
OPENVINO_THROW("This code should be executed only for PartialShape class"); namespace shape_of {
} template <class TShape, class TRShape = result_shape_t<TShape>>
std::vector<TRShape> shape_infer(const Node* op, std::vector<TShape> input_shapes) {
NODE_VALIDATION_CHECK(op, input_shapes.size() == 1);
const auto& input_shape = input_shapes[0];
const auto& input_rank = input_shape.rank();
template <> auto output_shapes = std::vector<TRShape>(1);
inline void dynamic_shape<ov::PartialShape>(ov::PartialShape& output_shape) {
output_shape = ov::PartialShape::dynamic();
}
template <class T> if (input_rank.is_static()) {
void shape_of_shape_infer(const T& input_shape, T& output_shape) { if (input_shape.size()) {
if (input_shape.rank().is_static()) { output_shapes[0].emplace_back(input_shape.size());
const auto& rank = input_shape.size();
if (rank) {
output_shape.resize(1);
output_shape[0] = rank;
} else {
output_shape.clear();
} }
} else { } else {
dynamic_shape(output_shape); output_shapes[0] = PartialShape::dynamic();
} }
return output_shapes;
} }
} // namespace shape_of
template <class T> namespace v0 {
void shape_infer(const ov::opset1::ShapeOf* op, const std::vector<T>& input_shapes, std::vector<T>& output_shapes) { template <class TShape, class TRShape = result_shape_t<TShape>>
NODE_VALIDATION_CHECK(op, input_shapes.size() == 1 && output_shapes.size() == 1); std::vector<TRShape> shape_infer(const ShapeOf* op, const std::vector<TShape>& input_shapes) {
shape_of_shape_infer(input_shapes[0], output_shapes[0]); return shape_of::shape_infer(op, input_shapes);
} }
} // namespace v0
template <class T> namespace v3 {
void shape_infer(const ov::opset3::ShapeOf* op, const std::vector<T>& input_shapes, std::vector<T>& output_shapes) { template <class TShape, class TRShape = result_shape_t<TShape>>
NODE_VALIDATION_CHECK(op, input_shapes.size() == 1 && output_shapes.size() == 1); std::vector<TRShape> shape_infer(const ShapeOf* op, const std::vector<TShape>& input_shapes) {
shape_of_shape_infer(input_shapes[0], output_shapes[0]); return shape_of::shape_infer(op, input_shapes);
} }
} // namespace v3
} // namespace op
} // namespace ov

View File

@ -7,13 +7,14 @@
#include <openvino/op/shuffle_channels.hpp> #include <openvino/op/shuffle_channels.hpp>
#include "openvino/core/validation_util.hpp" #include "openvino/core/validation_util.hpp"
#include "utils.hpp"
namespace ov { namespace ov {
namespace op { namespace op {
namespace v0 { namespace v0 {
template <class TShape> template <class TShape, class TRShape = result_shape_t<TShape>>
std::vector<TShape> shape_infer(const ShuffleChannels* op, const std::vector<TShape>& input_shapes) { std::vector<TRShape> shape_infer(const ShuffleChannels* op, const std::vector<TShape>& input_shapes) {
NODE_VALIDATION_CHECK(op, input_shapes.size() == 1); NODE_VALIDATION_CHECK(op, input_shapes.size() == 1);
const auto& group = op->get_group(); const auto& group = op->get_group();
@ -22,7 +23,7 @@ std::vector<TShape> shape_infer(const ShuffleChannels* op, const std::vector<TSh
const auto& input_shape = input_shapes[0]; const auto& input_shape = input_shapes[0];
const auto input_shape_rank = input_shape.rank(); const auto input_shape_rank = input_shape.rank();
auto output_shapes = std::vector<TShape>(1, input_shape); auto output_shapes = std::vector<TRShape>(1, input_shape);
if (input_shape_rank.is_static()) { if (input_shape_rank.is_static()) {
NODE_VALIDATION_CHECK(op, input_shape.size() >= 1, "The input tensor's shape is expected to be at least 1D."); NODE_VALIDATION_CHECK(op, input_shape.size() >= 1, "The input tensor's shape is expected to be at least 1D.");
@ -43,14 +44,6 @@ std::vector<TShape> shape_infer(const ShuffleChannels* op, const std::vector<TSh
return output_shapes; return output_shapes;
} }
template <class TShape>
void shape_infer(const ShuffleChannels* op,
const std::vector<TShape>& input_shapes,
std::vector<TShape>& output_shapes) {
output_shapes = shape_infer(op, input_shapes);
}
} // namespace v0 } // namespace v0
} // namespace op } // namespace op
} // namespace ov } // namespace ov

View File

@ -41,12 +41,11 @@ struct AxesMap {
namespace v8 { namespace v8 {
template <class T> template <class T, class TRShape = result_shape_t<T>>
void shape_infer(const Slice* op, std::vector<TRShape> shape_infer(const Slice* op,
const std::vector<T>& input_shapes, const std::vector<T>& input_shapes,
std::vector<T>& output_shapes, const ITensorAccessor& ta = make_tensor_accessor()) {
const std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>>& constant_data = {}) { using DimType = typename T::value_type;
using DimType = typename std::iterator_traits<typename T::iterator>::value_type;
const auto& num_of_inputs = input_shapes.size(); const auto& num_of_inputs = input_shapes.size();
@ -54,7 +53,6 @@ void shape_infer(const Slice* op,
num_of_inputs == 4 || num_of_inputs == 5, num_of_inputs == 4 || num_of_inputs == 5,
"Slice has to have 4 or 5 inputs. Got: ", "Slice has to have 4 or 5 inputs. Got: ",
num_of_inputs); num_of_inputs);
NODE_VALIDATION_CHECK(op, output_shapes.size() == 1);
const auto& input_shape = input_shapes[0]; const auto& input_shape = input_shapes[0];
const auto& input_rank = input_shape.rank(); const auto& input_rank = input_shape.rank();
@ -88,17 +86,18 @@ void shape_infer(const Slice* op,
start_shape.compatible(stop_shape) && start_shape.compatible(step_shape) && stop_shape.compatible(step_shape), start_shape.compatible(stop_shape) && start_shape.compatible(step_shape) && stop_shape.compatible(step_shape),
"Slice `start`, `stop`, `step` inputs must have compatible shapes."); "Slice `start`, `stop`, `step` inputs must have compatible shapes.");
auto output_shapes = std::vector<TRShape>(1);
// it is not possible to define output shape if input data shape rank is undefined // it is not possible to define output shape if input data shape rank is undefined
// even the lengths of begin, end, or strides are defined // even the lengths of begin, end, or strides are defined
if (input_rank.is_dynamic()) { if (input_rank.is_dynamic()) {
output_shapes[0] = PartialShape::dynamic(); output_shapes[0] = PartialShape::dynamic();
return; return output_shapes;
} }
// compute constant values of begin, end, and strides if possible // compute constant values of begin, end, and strides if possible
const auto start = slice::get_input_bounds<T>(op, 1, constant_data); const auto start = get_input_bounds<TRShape, int64_t>(op, 1, ta);
const auto stop = slice::get_input_bounds<T>(op, 2, constant_data); const auto stop = get_input_bounds<TRShape, int64_t>(op, 2, ta);
const auto steps = get_input_const_data_as<T, int64_t>(op, 3, constant_data); const auto steps = get_input_const_data_as<TRShape, int64_t>(op, 3, ta);
slice::AxesMap axes_map; slice::AxesMap axes_map;
if (input_shapes.size() > 4) { if (input_shapes.size() > 4) {
@ -106,7 +105,7 @@ void shape_infer(const Slice* op,
input_shapes[4].compatible(start_shape), input_shapes[4].compatible(start_shape),
"Slice `axes` input must have compatible shape with `start`, `stop`, `step` inputs."); "Slice `axes` input must have compatible shape with `start`, `stop`, `step` inputs.");
if (auto axes = get_input_const_data_as<T, int64_t>(op, 4, constant_data)) { if (auto axes = get_input_const_data_as<TRShape, int64_t>(op, 4, ta)) {
OPENVINO_SUPPRESS_DEPRECATED_START OPENVINO_SUPPRESS_DEPRECATED_START
ov::normalize_axes(op, input_shape.rank().get_length(), *axes); ov::normalize_axes(op, input_shape.rank().get_length(), *axes);
OPENVINO_SUPPRESS_DEPRECATED_END OPENVINO_SUPPRESS_DEPRECATED_END
@ -150,6 +149,7 @@ void shape_infer(const Slice* op,
out.emplace_back(0, input_dim.get_max_length()); out.emplace_back(0, input_dim.get_max_length());
} }
} }
return output_shapes;
} }
} // namespace v8 } // namespace v8
} // namespace op } // namespace op

View File

@ -11,107 +11,6 @@
#include "validation_util.hpp" #include "validation_util.hpp"
namespace ov { namespace ov {
namespace internal {
/**
* \brief Check if value of type T has got maximum value of type U.
*
* \tparam T Input value type
* \tparam U Type to get its minimum for comparision. Default same as T.
*
* \param value Input value.
*
* \return True if input value has got maximum value of type U otherwise false.
*/
template <class T, class U = T>
constexpr bool is_max(const T& value) {
return std::numeric_limits<U>::max() == value;
}
/**
* \brief Check if value of type T has got minimum value of type U.
*
* \tparam T Input value type.
* \tparam U Type to get its minimum for comparision. Default same as T.
*
* \param value Input value.
*
* \return True if input value has got minimum value of type U otherwise false.
*/
template <class T, class U = T>
constexpr bool is_min(const T& value) {
return std::numeric_limits<U>::min() == value;
}
} // namespace internal
namespace element {
/**
* \brief Check if value has got maximum value of ov::element::Type_t
*
* \tparam T Input value type.
*
* \param type ov::element type to get its maximum.
* \param value Input value for check.
*
* \return True if input value has got maximum number specified by ov::element type otherwise false.
*/
template <class T>
bool is_max_of(const element::Type_t& type, const T& value) {
switch (type) {
case element::i32:
return internal::is_max<T, typename element_type_traits<element::i32>::value_type>(value);
case element::i64:
return internal::is_max<T, typename element_type_traits<element::i64>::value_type>(value);
default:
return false;
}
}
/**
* \brief Check if value has got minimum value of ov::element::Type_t
*
* \tparam T Input value type.
*
* \param type ov::element type to get its minimum.
* \param value Input value for check.
*
* \return True if input value has got minimum number specified by ov::element type otherwise false.
*/
template <class T>
bool is_min_of(const element::Type_t type, const T& value) {
switch (type) {
case element::i32:
return internal::is_min<T, typename element_type_traits<element::i32>::value_type>(value);
case element::i64:
return internal::is_min<T, typename element_type_traits<element::i64>::value_type>(value);
default:
return false;
}
}
/**
* \brief Checks input value for element type maximum or minimum and return limit or value.
*
* \tparam T Type of input value.
* \tparam U Type of return value. Default same as T.
*
* \param type Type of ov::element::Type_t
* \param value Input value for check.
*
* \return If value is maximum or minimum get limit of U otherwise value as U.
*/
template <class T, class U = T>
U get_value_or_limit_of(const element::Type_t& type, const T& value) {
if (is_min_of(type, value)) {
return std::numeric_limits<U>::min();
} else if (is_max_of(type, value)) {
return std::numeric_limits<U>::max();
} else {
return static_cast<U>(value);
}
}
} // namespace element
namespace op { namespace op {
namespace slice { namespace slice {
@ -135,14 +34,14 @@ inline int64_t get_sliced_value(const int64_t dim, const int64_t start, const in
constexpr int64_t min_bound = 0; constexpr int64_t min_bound = 0;
const auto& norm_dim = dim::is_inf_bound(dim) ? std::numeric_limits<int64_t>::max() : dim; const auto& norm_dim = dim::is_inf_bound(dim) ? std::numeric_limits<int64_t>::max() : dim;
const auto is_norm_dim_max = ov::internal::is_max(norm_dim); const auto is_norm_dim_max = ov::util::is_max(norm_dim);
const auto is_start_lt_min_bound = start < min_bound; const auto is_start_lt_min_bound = start < min_bound;
const auto are_bounds_diff_sign = is_start_lt_min_bound != (stop < 0); const auto are_bounds_diff_sign = is_start_lt_min_bound != (stop < 0);
const auto is_start_max = ov::internal::is_max(start); const auto is_start_max = ov::util::is_max(start);
const auto is_start_limit = is_start_max || ov::internal::is_min(start); const auto is_start_limit = is_start_max || ov::util::is_min(start);
const auto is_stop_max = ov::internal::is_max(stop); const auto is_stop_max = ov::util::is_max(stop);
const auto any_bound_max = is_start_max || is_stop_max; const auto any_bound_max = is_start_max || is_stop_max;
// Prepare bounds for sliced value calculation. // Prepare bounds for sliced value calculation.
int64_t lb, ub; int64_t lb, ub;
@ -187,21 +86,6 @@ inline int64_t get_sliced_value(const int64_t dim, const int64_t start, const in
} }
} }
// To get element type from constant or tensor.
inline element::Type get_input_const_element_type(const ov::Node* op,
size_t idx,
const std::map<size_t, HostTensorPtr>& constant_data = {}) {
if (constant_data.count(idx)) {
return constant_data.at(idx)->get_element_type();
OPENVINO_SUPPRESS_DEPRECATED_START
} else if (const auto& constant = ov::get_constant_from_source(op->input_value(idx))) {
OPENVINO_SUPPRESS_DEPRECATED_END
return constant->get_element_type();
} else {
return element::undefined;
}
}
using Bounds = std::pair<int64_t, int64_t>; //!< Alias to dimension bounds for slice. using Bounds = std::pair<int64_t, int64_t>; //!< Alias to dimension bounds for slice.
/** /**
@ -243,58 +127,6 @@ constexpr bool is_ub_within_dim(const int64_t ub, const TDim& dim) {
cmp::lt(ub, dim.get_max_length()); cmp::lt(ub, dim.get_max_length());
} }
/**
* \brief Get the input bounds from constant input (constant map) or evaluate bunds
* and return them as vector of pairs (lower, upper).
*
* \tparam TShape Shape type.
*
* \param op Operator pointer.
* \param idx Input index.
* \param constant_data Map with constant data.
*
* \return Return vector of slice::Bounds.
*/
template <class TShape, class TResult = std::vector<Bounds>>
std::unique_ptr<TResult> get_input_bounds(const ov::Node* op,
size_t idx,
const std::map<size_t, HostTensorPtr>& constant_data) {
// Helper to create TResult from lowers and uppers.
const auto make_bounds_vec =
[](const element::Type& et, const std::vector<int64_t>& lowers, const std::vector<int64_t>& uppers) {
TResult out;
out.reserve(lowers.size());
std::transform(lowers.begin(),
lowers.end(),
uppers.begin(),
std::back_inserter(out),
[&et](int64_t lb, int64_t ub) {
return std::make_pair(element::get_value_or_limit_of(et, lb),
element::get_value_or_limit_of(et, ub));
});
return out;
};
std::unique_ptr<TResult> out;
if (auto lowers = op::get_input_const_data_as<TShape, int64_t>(op, idx, constant_data)) {
const auto& et = get_input_const_element_type(op, idx, constant_data);
out.reset(new TResult(make_bounds_vec(et, *lowers, *lowers)));
} else {
ov::Tensor lb, ub;
std::tie(lb, ub) = ov::evaluate_both_bounds(op->get_input_source_output(idx));
if (lb && ub) {
const auto& et = op->get_input_element_type(idx);
auto lowers = std::make_shared<op::v0::Constant>(lb.get_element_type(), lb.get_shape(), lb.data())
->cast_vector<int64_t>();
auto uppers = std::make_shared<op::v0::Constant>(ub.get_element_type(), ub.get_shape(), ub.data())
->cast_vector<int64_t>();
out.reset(new TResult(make_bounds_vec(et, lowers, uppers)));
}
}
return out;
}
/** /**
* \brief Make sliced dimension for input dimension by step from start to stop bounds. * \brief Make sliced dimension for input dimension by step from start to stop bounds.
* *

View File

@ -7,19 +7,17 @@
#include <cstdint> #include <cstdint>
#include "dimension_util.hpp" #include "dimension_util.hpp"
#include "openvino/core/validation_util.hpp"
#include "openvino/op/space_to_batch.hpp" #include "openvino/op/space_to_batch.hpp"
#include "openvino/opsets/opset2.hpp"
#include "utils.hpp" #include "utils.hpp"
namespace ov { namespace ov {
namespace op { namespace op {
namespace v1 { namespace v1 {
template <class TShape> template <class TShape, class TRShape = result_shape_t<TShape>>
std::vector<TShape> shape_infer(const SpaceToBatch* op, std::vector<TRShape> shape_infer(const SpaceToBatch* op,
const std::vector<TShape>& input_shapes, const std::vector<TShape>& input_shapes,
const std::map<size_t, HostTensorPtr>& constant_data = {}) { const ITensorAccessor& ta = make_tensor_accessor()) {
using namespace ov::util; using namespace ov::util;
using TVal = typename TShape::value_type::value_type; using TVal = typename TShape::value_type::value_type;
NODE_VALIDATION_CHECK(op, input_shapes.size() == 4); NODE_VALIDATION_CHECK(op, input_shapes.size() == 4);
@ -29,10 +27,10 @@ std::vector<TShape> shape_infer(const SpaceToBatch* op,
const auto& pads_begin_shape = input_shapes[2]; const auto& pads_begin_shape = input_shapes[2];
const auto& pads_end_shape = input_shapes[3]; const auto& pads_end_shape = input_shapes[3];
auto inputs_same_ps = pads_begin_shape; auto inputs_same_ps = static_cast<TRShape>(pads_begin_shape);
NODE_VALIDATION_CHECK( NODE_VALIDATION_CHECK(
op, op,
TShape::merge_into(inputs_same_ps, pads_end_shape) && TShape::merge_into(inputs_same_ps, block_shape), TRShape::merge_into(inputs_same_ps, pads_end_shape) && TRShape::merge_into(inputs_same_ps, block_shape),
"block_shape, pads_begin and pads_end inputs must have the same shape. Got: ", "block_shape, pads_begin and pads_end inputs must have the same shape. Got: ",
block_shape, block_shape,
", ", ", ",
@ -54,10 +52,10 @@ std::vector<TShape> shape_infer(const SpaceToBatch* op,
data_rank_size, data_rank_size,
")"); ")");
TShape out_shape; TRShape out_shape;
out_shape.reserve(data_rank_size); out_shape.reserve(data_rank_size);
auto blocks = get_input_const_data_as<TShape, int64_t>(op, 1, constant_data); auto blocks = get_input_const_data_as<TShape, int64_t>(op, 1, ta);
if (blocks) { if (blocks) {
TVal block_prod = std::accumulate(begin(*blocks), end(*blocks), int64_t(1), std::multiplies<int64_t>()); TVal block_prod = std::accumulate(begin(*blocks), end(*blocks), int64_t(1), std::multiplies<int64_t>());
out_shape.push_back(data_shape[0] * block_prod); out_shape.push_back(data_shape[0] * block_prod);
@ -65,13 +63,13 @@ std::vector<TShape> shape_infer(const SpaceToBatch* op,
out_shape.emplace_back(dim::inf_bound); out_shape.emplace_back(dim::inf_bound);
} }
std::vector<int64_t> pads_begin, pads_end; auto pads_begin = get_input_const_data_as<TShape, int64_t>(op, 2, ta);
if (blocks && get_data_as_int64<TShape>(2, op, pads_begin, constant_data) && auto pads_end = get_input_const_data_as<TShape, int64_t>(op, 3, ta);
get_data_as_int64<TShape>(3, op, pads_end, constant_data)) { if (blocks && pads_begin && pads_end) {
for (auto idx = spatial_dim_offset; idx < data_rank_size; ++idx) { for (auto idx = spatial_dim_offset; idx < data_rank_size; ++idx) {
NODE_VALIDATION_CHECK(op, (*blocks)[idx] > 0, "block_shape values must be greater than 0"); NODE_VALIDATION_CHECK(op, (*blocks)[idx] > 0, "block_shape values must be greater than 0");
const auto padded_dim = data_shape[idx] + static_cast<TVal>(pads_begin[idx] + pads_end[idx]); const auto padded_dim = data_shape[idx] + static_cast<TVal>((*pads_begin)[idx] + (*pads_end)[idx]);
const auto divisor = static_cast<TVal>((*blocks)[idx]); const auto divisor = static_cast<TVal>((*blocks)[idx]);
if (static_cast<int64_t>(padded_dim.get_max_length()) == dim::inf_bound) { if (static_cast<int64_t>(padded_dim.get_max_length()) == dim::inf_bound) {
@ -91,15 +89,6 @@ std::vector<TShape> shape_infer(const SpaceToBatch* op,
return {PartialShape::dynamic()}; return {PartialShape::dynamic()};
} }
} }
template <class TShape>
void shape_infer(const SpaceToBatch* op,
const std::vector<TShape>& input_shapes,
std::vector<TShape>& output_shapes,
const std::map<size_t, HostTensorPtr>& constant_data = {}) {
output_shapes = shape_infer(op, input_shapes, constant_data);
}
} // namespace v1 } // namespace v1
} // namespace op } // namespace op
} // namespace ov } // namespace ov

View File

@ -14,8 +14,8 @@ namespace ov {
namespace op { namespace op {
namespace v0 { namespace v0 {
template <class TShape> template <class TShape, class TRShape = result_shape_t<TShape>>
std::vector<TShape> shape_infer(const ov::op::v0::SpaceToDepth* op, const std::vector<TShape>& input_shapes) { std::vector<TRShape> shape_infer(const ov::op::v0::SpaceToDepth* op, const std::vector<TShape>& input_shapes) {
using TVal = typename TShape::value_type::value_type; using TVal = typename TShape::value_type::value_type;
NODE_VALIDATION_CHECK(op, input_shapes.size() == 1); NODE_VALIDATION_CHECK(op, input_shapes.size() == 1);
@ -31,7 +31,7 @@ std::vector<TShape> shape_infer(const ov::op::v0::SpaceToDepth* op, const std::v
const auto& block_size = op->get_block_size(); const auto& block_size = op->get_block_size();
NODE_VALIDATION_CHECK(op, block_size > 0, "The block size must be greater than 0 ", block_size); NODE_VALIDATION_CHECK(op, block_size > 0, "The block size must be greater than 0 ", block_size);
auto out_shape = data_shape; TRShape out_shape = data_shape;
out_shape[1] *= static_cast<TVal>(std::pow(block_size, data_shape.size() - spatial_dim_offset)); out_shape[1] *= static_cast<TVal>(std::pow(block_size, data_shape.size() - spatial_dim_offset));
const auto divisor = static_cast<TVal>(block_size); const auto divisor = static_cast<TVal>(block_size);
@ -44,14 +44,6 @@ std::vector<TShape> shape_infer(const ov::op::v0::SpaceToDepth* op, const std::v
return {PartialShape::dynamic()}; return {PartialShape::dynamic()};
} }
} }
template <class TShape>
void shape_infer(const ov::op::v0::SpaceToDepth* op,
const std::vector<TShape>& input_shapes,
std::vector<TShape>& output_shapes) {
output_shapes = shape_infer(op, input_shapes);
}
} // namespace v0 } // namespace v0
} // namespace op } // namespace op
} // namespace ov } // namespace ov

View File

@ -4,9 +4,8 @@
#pragma once #pragma once
#include <openvino/core/validation_util.hpp> #include "openvino/core/validation_util.hpp"
#include <openvino/op/split.hpp> #include "openvino/op/split.hpp"
#include "utils.hpp" #include "utils.hpp"
namespace ov { namespace ov {
@ -23,37 +22,33 @@ namespace v1 {
* *
* \param op Split operator pointer. * \param op Split operator pointer.
* \param input_shapes Split input shapes. * \param input_shapes Split input shapes.
* \param output_shapes Split output shapes. * \param ta Tensor accessor to constant data.
* \param constant_data Map of constant data.
*/ */
template <typename T> template <typename T, class TRShape = result_shape_t<T>>
void shape_infer(const Split* op, std::vector<TRShape> shape_infer(const Split* op,
const std::vector<T>& input_shapes, const std::vector<T>& input_shapes,
std::vector<T>& output_shapes, const ITensorAccessor& ta = make_tensor_accessor()) {
const std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>>& constant_data = {}) {
NODE_VALIDATION_CHECK(op, (input_shapes.size() == 2)); NODE_VALIDATION_CHECK(op, (input_shapes.size() == 2));
output_shapes.clear();
const auto& data_ps = input_shapes[0]; const auto& data_ps = input_shapes[0];
const auto& axis_ps = input_shapes[1]; const auto& axis_ps = input_shapes[1];
NODE_VALIDATION_CHECK(op, axis_ps.rank().compatible(0), "'axis' input must be a scalar. Got: ", axis_ps); NODE_VALIDATION_CHECK(op, axis_ps.rank().compatible(0), "'axis' input must be a scalar. Got: ", axis_ps);
auto each_output_shape = data_ps; TRShape each_output_shape = data_ps;
const auto data_rank = data_ps.rank(); const auto data_rank = data_ps.rank();
std::vector<int64_t> axes_values; auto axes_values = get_input_const_data_as<TRShape, int64_t>(op, 1, ta);
const auto& num_splits = op->get_num_splits(); const auto& num_splits = op->get_num_splits();
if (get_data_as_int64<T>(1, op, axes_values, constant_data) && data_rank.is_static()) { if (axes_values && data_rank.is_static()) {
NODE_VALIDATION_CHECK(op, NODE_VALIDATION_CHECK(op,
axes_values.size() == 1, axes_values->size() == 1,
"a scalar axis value is expected. Got: ", "a scalar axis value is expected. Got: ",
axes_values.size(), axes_values->size(),
" axes"); " axes");
OPENVINO_SUPPRESS_DEPRECATED_START OPENVINO_SUPPRESS_DEPRECATED_START
auto axis = ov::normalize_axis(op, axes_values[0], data_rank); auto axis = ov::normalize_axis(op, (*axes_values)[0], data_rank);
OPENVINO_SUPPRESS_DEPRECATED_END OPENVINO_SUPPRESS_DEPRECATED_END
if (data_ps[axis].is_static()) { if (data_ps[axis].is_static()) {
@ -90,7 +85,7 @@ void shape_infer(const Split* op,
each_output_shape = ov::PartialShape::dynamic(data_ps.rank()); each_output_shape = ov::PartialShape::dynamic(data_ps.rank());
} }
output_shapes.resize(num_splits, each_output_shape); return {num_splits, each_output_shape};
} }
} // namespace v1 } // namespace v1

View File

@ -18,22 +18,20 @@ namespace v0 {
* *
* \param op Squeeze operator pointer. * \param op Squeeze operator pointer.
* \param input_shapes Squeeze input shapes. * \param input_shapes Squeeze input shapes.
* \param output_shapes Output shapes result of squeeze shape inference. * \param ta Tensor accessor to constant data.
* \param constant_data Map of constant data.
*/ */
template <class T> template <class T, class TRShape = result_shape_t<T>>
void shape_infer(const Squeeze* op, std::vector<TRShape> shape_infer(const Squeeze* op,
const std::vector<T>& input_shapes, const std::vector<T>& input_shapes,
std::vector<T>& output_shapes, const ITensorAccessor& ta = make_tensor_accessor()) {
const std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>>& constant_data = {}) { using DimType = typename T::value_type;
using DimType = typename std::iterator_traits<typename T::iterator>::value_type;
NODE_VALIDATION_CHECK(op, output_shapes.size() == 1);
const auto number_of_inputs = input_shapes.size(); const auto number_of_inputs = input_shapes.size();
OPENVINO_ASSERT(!input_shapes.empty()); OPENVINO_ASSERT(!input_shapes.empty());
const auto& arg_shape = input_shapes[0]; const auto& arg_shape = input_shapes[0];
const auto& arg_rank = arg_shape.rank(); const auto& arg_rank = arg_shape.rank();
auto output_shapes = std::vector<TRShape>(1);
auto& output_shape = output_shapes[0]; auto& output_shape = output_shapes[0];
std::unique_ptr<std::set<int64_t>> unique_axes; std::unique_ptr<std::set<int64_t>> unique_axes;
@ -51,12 +49,12 @@ void shape_infer(const Squeeze* op,
std::vector<int64_t> axes; std::vector<int64_t> axes;
if (arg_rank.is_static() && axes_shape.is_static()) { if (arg_rank.is_static() && axes_shape.is_static()) {
if (get_data_as_int64<T>(1, op, axes, constant_data)) { if (auto axes = get_input_const_data_as<TRShape, int64_t>(op, 1, ta)) {
// The values of `axes` input are known // The values of `axes` input are known
OPENVINO_SUPPRESS_DEPRECATED_START OPENVINO_SUPPRESS_DEPRECATED_START
normalize_axes(op, arg_rank.get_length(), axes); normalize_axes(op, arg_rank.get_length(), *axes);
OPENVINO_SUPPRESS_DEPRECATED_END OPENVINO_SUPPRESS_DEPRECATED_END
unique_axes.reset(new std::set<int64_t>(axes.cbegin(), axes.cend())); unique_axes.reset(new std::set<int64_t>(axes->cbegin(), axes->cend()));
} else if (arg_rank.get_length() > 0 && shape_size(axes_shape.to_shape()) == 1) { } else if (arg_rank.get_length() > 0 && shape_size(axes_shape.to_shape()) == 1) {
// The `axes` input must be a Parameter with single element to ensure uniqueness of axes // The `axes` input must be a Parameter with single element to ensure uniqueness of axes
// only rank is deduced // only rank is deduced
@ -71,7 +69,7 @@ void shape_infer(const Squeeze* op,
" doesn't contain squeezable dimension," " doesn't contain squeezable dimension,"
" but axes input is expected to have one element."); " but axes input is expected to have one element.");
output_shape = PartialShape::dynamic(arg_rank.get_length() - 1); output_shape = PartialShape::dynamic(arg_rank.get_length() - 1);
return; return output_shapes;
} }
} }
} else { } else {
@ -122,6 +120,7 @@ void shape_infer(const Squeeze* op,
} else { } else {
output_shape = PartialShape::dynamic(); output_shape = PartialShape::dynamic();
} }
return output_shapes;
} }
} // namespace v0 } // namespace v0
} // namespace op } // namespace op

View File

@ -14,15 +14,14 @@ namespace ov {
namespace op { namespace op {
namespace v1 { namespace v1 {
template <class T> template <class T, class TRShape = result_shape_t<T>>
void shape_infer(const StridedSlice* op, std::vector<TRShape> shape_infer(const StridedSlice* op,
const std::vector<T>& input_shapes, const std::vector<T>& input_shapes,
std::vector<T>& output_shapes, const ITensorAccessor& ta = make_tensor_accessor()) {
const std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>>& constant_data = {}) { using DimType = typename T::value_type;
using DimType = typename std::iterator_traits<typename T::iterator>::value_type;
static constexpr std::array<char const*, 3> shape_names{"Begin", "End", "Strides"}; static constexpr std::array<char const*, 3> shape_names{"Begin", "End", "Strides"};
NODE_VALIDATION_CHECK(op, (input_shapes.size() == 3 || input_shapes.size() == 4) && output_shapes.size() == 1); NODE_VALIDATION_CHECK(op, (input_shapes.size() == 3 || input_shapes.size() == 4));
const auto& input_shape = input_shapes[0]; const auto& input_shape = input_shapes[0];
@ -38,12 +37,12 @@ void shape_infer(const StridedSlice* op,
const auto& begin_shape = input_shapes[1]; const auto& begin_shape = input_shapes[1];
const auto& end_shape = input_shapes[2]; const auto& end_shape = input_shapes[2];
auto output_shapes = std::vector<TRShape>(1);
// it is not possible to define output shape if input data shape rank is undefined // it is not possible to define output shape if input data shape rank is undefined
// even the lengths of begin, end, or strides are defined // even the lengths of begin, end, or strides are defined
if (input_shape.rank().is_dynamic()) { if (input_shape.rank().is_dynamic()) {
output_shapes[0] = ov::PartialShape::dynamic(); output_shapes[0] = ov::PartialShape::dynamic();
return; return output_shapes;
} }
auto input_rank = input_shape.size(); auto input_rank = input_shape.size();
@ -59,12 +58,12 @@ void shape_infer(const StridedSlice* op,
}; };
// compute constant values of begin, end, and strides if possible // compute constant values of begin, end, and strides if possible
const auto begin = slice::get_input_bounds<T>(op, 1, constant_data); const auto begin = get_input_bounds<TRShape, int64_t>(op, 1, ta);
const auto end = slice::get_input_bounds<T>(op, 2, constant_data); const auto end = get_input_bounds<TRShape, int64_t>(op, 2, ta);
std::unique_ptr<std::vector<int64_t>> strides; std::unique_ptr<std::vector<int64_t>> strides;
if (input_shapes.size() > 3) { if (input_shapes.size() > 3) {
strides = get_input_const_data_as<T, int64_t>(op, 3, constant_data); strides = get_input_const_data_as<TRShape, int64_t>(op, 3, ta);
} else if (begin) { } else if (begin) {
// generate default strides // generate default strides
strides.reset(new std::vector<int64_t>(begin->size(), 1)); strides.reset(new std::vector<int64_t>(begin->size(), 1));
@ -90,7 +89,7 @@ void shape_infer(const StridedSlice* op,
// if number of axes is undefined we cannot say about output rank // if number of axes is undefined we cannot say about output rank
if (number_axes < 0) { if (number_axes < 0) {
output_shapes[0] = ov::PartialShape::dynamic(); output_shapes[0] = ov::PartialShape::dynamic();
return; return output_shapes;
} }
// collect indices of axes by which the shape needs to be changed // collect indices of axes by which the shape needs to be changed
@ -196,6 +195,7 @@ void shape_infer(const StridedSlice* op,
for (; input_shape_idx < input_shape.rank().get_length(); ++input_shape_idx) { for (; input_shape_idx < input_shape.rank().get_length(); ++input_shape_idx) {
out.push_back(input_shape[input_shape_idx]); out.push_back(input_shape[input_shape_idx]);
} }
return output_shapes;
} }
} // namespace v1 } // namespace v1
} // namespace op } // namespace op

View File

@ -31,6 +31,7 @@ struct GetK {
return static_cast<T>(k); return static_cast<T>(k);
} }
}; };
} // namespace util
/** /**
* \brief TopK shape inference * \brief TopK shape inference
* *
@ -42,10 +43,10 @@ struct GetK {
* *
* \return Vector of output shapes for * \return Vector of output shapes for
*/ */
template <class TShape> template <class TShape, class TRShape = result_shape_t<TShape>>
std::vector<TShape> shape_infer(const util::TopKBase* op, std::vector<TRShape> shape_infer(const util::TopKBase* op,
const std::vector<TShape>& input_shapes, const std::vector<TShape>& input_shapes,
const std::map<size_t, HostTensorPtr>& constant_data = {}) { const ITensorAccessor& tensor_accessor = make_tensor_accessor()) {
using TDim = typename TShape::value_type; using TDim = typename TShape::value_type;
using TDimValue = typename TDim::value_type; using TDimValue = typename TDim::value_type;
@ -66,21 +67,21 @@ std::vector<TShape> shape_infer(const util::TopKBase* op,
const auto& k_shape = input_shapes[1]; const auto& k_shape = input_shapes[1];
NODE_SHAPE_INFER_CHECK(op, input_shapes, k_shape.rank().compatible(0), "The 'K' input must be a scalar."); NODE_SHAPE_INFER_CHECK(op, input_shapes, k_shape.rank().compatible(0), "The 'K' input must be a scalar.");
auto output_shape = input_shape; TRShape output_shape = input_shape;
if (input_shape.rank().is_static()) { if (input_shape.rank().is_static()) {
OPENVINO_SUPPRESS_DEPRECATED_START OPENVINO_SUPPRESS_DEPRECATED_START
const auto normalized_axis = ov::normalize_axis(op, op->get_provided_axis(), input_shape.rank()); const auto normalized_axis = ov::normalize_axis(op, op->get_provided_axis(), input_shape.rank());
OPENVINO_SUPPRESS_DEPRECATED_END OPENVINO_SUPPRESS_DEPRECATED_END
auto& dim_axis = output_shape[normalized_axis]; auto& dim_axis = output_shape[normalized_axis];
if (auto k_as_shape = get_input_const_data_as_shape<TShape>(op, 1, constant_data, GetK<TDimValue>(op))) { if (auto k_as_shape =
NODE_SHAPE_INFER_CHECK(op, get_input_const_data_as_shape<TRShape>(op, 1, tensor_accessor, util::GetK<TDimValue>(op))) {
input_shapes, NODE_VALIDATION_CHECK(op,
k_as_shape->size() == 1, k_as_shape->size() == 1,
"Only one value (scalar) should be provided as the 'K' input to TopK", "Only one value (scalar) should be provided as the 'K' input to TopK",
" (got ", " (got ",
k_as_shape->size(), k_as_shape->size(),
" elements)."); " elements).");
const auto& k = (*k_as_shape)[0]; const auto& k = (*k_as_shape)[0];
if (k.is_static()) { if (k.is_static()) {
@ -105,39 +106,7 @@ std::vector<TShape> shape_infer(const util::TopKBase* op,
} }
} }
return std::vector<TShape>(2, output_shape); return {2, output_shape};
} }
} // namespace util
namespace v1 {
/**
* \brief TopK shape inference
*
* \tparam TShape Type of shape.
*
* \param op Pointer to TopK operator.
* \param input_shapes Input shapes of TopK.
* \param output_shapes Output shapes of TopK
* \param constant_data Map of constant data. Default empty.
*/
template <typename T>
void shape_infer(const TopK* op,
const std::vector<T>& input_shapes,
std::vector<T>& output_shapes,
const std::map<size_t, HostTensorPtr>& constant_data = {}) {
output_shapes = util::shape_infer(op, input_shapes, constant_data);
}
} // namespace v1
namespace v3 {
template <typename T>
void shape_infer(const TopK* op,
const std::vector<T>& input_shapes,
std::vector<T>& output_shapes,
const std::map<size_t, HostTensorPtr>& constant_data = {}) {
output_shapes = util::shape_infer(op, input_shapes, constant_data);
}
} // namespace v3
} // namespace op } // namespace op
} // namespace ov } // namespace ov

Some files were not shown because too many files have changed in this diff Show More