From 392b67f082076c29676c5370939a2210f6feec45 Mon Sep 17 00:00:00 2001 From: Pawel Raasz Date: Thu, 30 Mar 2023 13:55:53 +0200 Subject: [PATCH] Fix pooling padding update (#16531) * Review adaptive max pool shape inference * Review AvgPool and MaxPool * Review convolution operator * Review GroupConvolution shape inference * Review ConvolutionBackpropData operator * Review GroupConvolutionBackpropData op * Review BinaryConvolution operator - add common bases for convolution ops - refactor convolution ops * Review DeformableConvolution operator * Use new convolution shape_infer in GPU * Fix build and test issues * Correct set output spatial shape in default constructed back prop convolutions * The convolution shape_infer use pads as parameters the external padding can be operators or other class padding properties shape_infer should not modify operators padding when called from plugin * Apply code formatting * Fix padding validation and update * Max and Avg pool don't update op properties from plugin shape inference - use ShapeInferWithPadding for pooling operators * Remove not used function in shape_inference * Fix evaluates in MaxPool * Relax convolution shape infer inputs size check * Remove unused entryFallbackWithPadding class * Remove unused dilations variable * Remove unused resize_attributes from max_pool_base --------- Co-authored-by: mitruska --- .../include/avg_pool_shape_inference.hpp | 24 +-- .../include/convolution_shape_inference.hpp | 2 +- .../include/max_pool_shape_inference.hpp | 46 ++--- .../include/pooling_shape_inference_util.hpp | 161 ++++++++++++------ src/core/src/op/avg_pool.cpp | 2 +- src/core/src/op/max_pool.cpp | 12 +- .../utils/shape_inference/shape_inference.cpp | 66 +------ 7 files changed, 155 insertions(+), 158 deletions(-) diff --git a/src/core/shape_inference/include/avg_pool_shape_inference.hpp b/src/core/shape_inference/include/avg_pool_shape_inference.hpp index 577d014a6c9..fe6df898366 100644 --- a/src/core/shape_inference/include/avg_pool_shape_inference.hpp +++ b/src/core/shape_inference/include/avg_pool_shape_inference.hpp @@ -36,23 +36,23 @@ inline void valid_dilated_kernel_with_padding(const v1::AvgPool* op, } // namespace pooling namespace v1 { - -template -std::vector shape_infer(const AvgPool* op, const std::vector& input_shapes) { +template +std::vector shape_infer(const AvgPool* op, + const std::vector& input_shapes, + TContainer& pads_begin, + TContainer& pads_end, + const std::map& constant_data = {}) { NODE_VALIDATION_CHECK(op, input_shapes.size() == 1); const auto& data_shape = input_shapes[0]; - const auto dilations = Strides(op->get_kernel().size(), 1); - pooling::update_and_validate_attributes(const_cast(op), data_shape, dilations); + auto num_spatial = dilations.size(); + pooling::resize_empty_padding(num_spatial, pads_begin, pads_end); + pooling::validate::padding(op, pads_begin, pads_end); + pooling::validate::attributes(op, data_shape, dilations); + pooling::apply_padding(op, data_shape, dilations, pads_begin, pads_end); - auto output_shape = pooling::out_shape_infer(op, data_shape, dilations); - return {output_shape}; -} - -template -void shape_infer(const AvgPool* op, const std::vector& input_shapes, std::vector& output_shapes) { - output_shapes = shape_infer(op, input_shapes); + return {pooling::out_shape_infer(op, data_shape, pads_begin, pads_end, dilations)}; } } // namespace v1 } // namespace op diff --git a/src/core/shape_inference/include/convolution_shape_inference.hpp b/src/core/shape_inference/include/convolution_shape_inference.hpp index e47ad7e1d88..b1bdeb4317e 100644 --- a/src/core/shape_inference/include/convolution_shape_inference.hpp +++ b/src/core/shape_inference/include/convolution_shape_inference.hpp @@ -16,7 +16,7 @@ std::vector shape_infer(const TFrowardConv* op, TContainer& pads_begin, TContainer& pads_end, const std::map& constant_data = {}) { - NODE_VALIDATION_CHECK(op, input_shapes.size() == 2); + NODE_VALIDATION_CHECK(op, input_shapes.size() >= 2); using namespace ov::util; const auto num_spatial = convolution::calculate_num_spatial(op, input_shapes); diff --git a/src/core/shape_inference/include/max_pool_shape_inference.hpp b/src/core/shape_inference/include/max_pool_shape_inference.hpp index f6136016f6d..806cd3f3db3 100644 --- a/src/core/shape_inference/include/max_pool_shape_inference.hpp +++ b/src/core/shape_inference/include/max_pool_shape_inference.hpp @@ -12,43 +12,47 @@ namespace ov { namespace op { namespace v1 { -template -std::vector shape_infer(const MaxPool* op, const std::vector& input_shapes) { - NODE_VALIDATION_CHECK(op, input_shapes.size() == 1); +template +std::vector shape_infer(const MaxPool* op, + const std::vector& input_shapes, + TContainer& pads_begin, + TContainer& pads_end, + const std::map& constant_data = {}) { const auto& data_shape = input_shapes[0]; - const auto dilations = Strides(op->get_kernel().size(), 1); - pooling::update_and_validate_attributes(const_cast(op), data_shape, dilations); + auto num_spatial = dilations.size(); + pooling::resize_empty_padding(num_spatial, pads_begin, pads_end); + pooling::validate::padding(op, pads_begin, pads_end); + pooling::validate::attributes(op, data_shape, dilations); + pooling::apply_padding(op, data_shape, dilations, pads_begin, pads_end); - return {pooling::out_shape_infer(op, data_shape, dilations)}; -} - -template -void shape_infer(const MaxPool* op, const std::vector& input_shapes, std::vector& output_shapes) { - output_shapes = shape_infer(op, input_shapes); + return {pooling::out_shape_infer(op, data_shape, pads_begin, pads_end, dilations)}; } } // namespace v1 namespace v8 { -template -std::vector shape_infer(const MaxPool* op, const std::vector& input_shapes) { +template +std::vector shape_infer(const MaxPool* op, + const std::vector& input_shapes, + TContainer& pads_begin, + TContainer& pads_end, + const std::map& constant_data = {}) { NODE_VALIDATION_CHECK(op, input_shapes.size() == 1); const auto& data_shape = input_shapes[0]; + auto num_spatial = op->get_kernel().size(); auto dilations = op->get_dilations(); if (dilations.empty()) { - dilations.resize(op->get_kernel().size(), 1); + dilations.resize(num_spatial, 1); } - pooling::update_and_validate_attributes(const_cast(op), data_shape, dilations); + pooling::resize_empty_padding(num_spatial, pads_begin, pads_end); + pooling::validate::padding(op, pads_begin, pads_end); + pooling::validate::attributes(op, data_shape, dilations); + pooling::apply_padding(op, data_shape, dilations, pads_begin, pads_end); - return {2, pooling::out_shape_infer(op, data_shape, dilations)}; -} - -template -void shape_infer(const MaxPool* op, const std::vector& input_shapes, std::vector& output_shapes) { - output_shapes = shape_infer(op, input_shapes); + return {2, pooling::out_shape_infer(op, data_shape, pads_begin, pads_end, dilations)}; } } // namespace v8 } // namespace op diff --git a/src/core/shape_inference/include/pooling_shape_inference_util.hpp b/src/core/shape_inference/include/pooling_shape_inference_util.hpp index 17462a82edb..c3f69904eb1 100644 --- a/src/core/shape_inference/include/pooling_shape_inference_util.hpp +++ b/src/core/shape_inference/include/pooling_shape_inference_util.hpp @@ -12,8 +12,22 @@ namespace op { namespace pooling { constexpr size_t spatial_dim_offset = 2; +namespace validate { +template +void padding(const TOp* op, const TContainer& pads_begin, const TContainer& pads_end) { + const auto num_spatial = op->get_kernel().size(); + NODE_VALIDATION_CHECK(op, + pads_begin.size() == num_spatial, + "Expected pads_begin size to be equal to input size - 2. Got: ", + pads_begin.size()); + NODE_VALIDATION_CHECK(op, + pads_end.size() == num_spatial, + "Expected pads_end size to be equal to input size - 2. Got: ", + pads_end.size()); +} + template -void update_and_validate_attributes(TOp* op, const TShape& data_shape, const Strides& dilations) { +void attributes(const TOp* op, const TShape& data_shape, const Strides& dilations) { const auto& data_rank = data_shape.rank(); NODE_VALIDATION_CHECK(op, @@ -22,25 +36,9 @@ void update_and_validate_attributes(TOp* op, const TShape& data_shape, const Str data_shape); const auto& kernel = op->get_kernel(); - const auto& auto_pad = op->get_auto_pad(); const auto num_spatial = kernel.size(); const auto& strides = op->get_strides(); - if (auto_pad == PadType::VALID || op->get_pads_begin().empty()) { - op->set_pads_begin(Shape(num_spatial, 0)); - } - if (auto_pad == PadType::VALID || op->get_pads_end().empty()) { - op->set_pads_end(Shape(num_spatial, 0)); - } - - NODE_VALIDATION_CHECK(op, - op->get_pads_begin().size() == num_spatial, - "Expected pads_begin size to be equal to input size - 2. Got: ", - op->get_pads_begin().size()); - NODE_VALIDATION_CHECK(op, - op->get_pads_end().size() == num_spatial, - "Expected pads_end size to be equal to input size - 2. Got: ", - op->get_pads_end().size()); NODE_VALIDATION_CHECK(op, strides.size() == num_spatial, "Expected strides size to be equal to input size - 2. Got: ", @@ -50,32 +48,10 @@ void update_and_validate_attributes(TOp* op, const TShape& data_shape, const Str "Expected dilations size to be equal to kernel size. Got: ", dilations.size()); - if (data_rank.is_static()) { - NODE_VALIDATION_CHECK(op, - num_spatial == (data_shape.size() - spatial_dim_offset), - "Expected kernel size to be equal to input size - 2. Got: ", - num_spatial); - - if (auto_pad == PadType::SAME_UPPER || auto_pad == PadType::SAME_LOWER) { - Shape pads_begin, pads_end; - pads_begin.reserve(num_spatial); - pads_end.reserve(num_spatial); - - auto data_dim = data_shape.cbegin() + spatial_dim_offset; - auto pad_begin_ins = std::back_inserter(pads_begin); - auto pad_end_ins = std::back_inserter(pads_end); - auto& pad_left = auto_pad == PadType::SAME_UPPER ? pad_begin_ins : pad_end_ins; - auto& pad_right = auto_pad == PadType::SAME_UPPER ? pad_end_ins : pad_begin_ins; - - for (size_t i = 0; i < num_spatial; ++i, ++pad_left, ++pad_right, ++data_dim) { - using namespace ov::util; - std::tie(*pad_left, *pad_right) = dim::padding(*data_dim, kernel[i], dilations[i], strides[i]); - } - - op->set_pads_begin(pads_begin); - op->set_pads_end(std::move(pads_end)); - } - } + NODE_VALIDATION_CHECK(op, + data_rank.is_dynamic() || num_spatial == (data_shape.size() - spatial_dim_offset), + "Expected kernel size to be equal to input size - 2. Got: ", + num_spatial); constexpr auto is_zero = cmp::Equal(0); NODE_VALIDATION_CHECK(op, @@ -87,6 +63,65 @@ void update_and_validate_attributes(TOp* op, const TShape& data_shape, const Str "Kernel dilations has zero dimension(s). ", dilations); } +} // namespace validate + +/** + * @brief Resize paddings if empty to number of spatial dimensions. + * + * @param num_spatial Number of spatial dimensions. + * @param pads_begin Begin padding to resize. + * @param pads_end End padding to resize. + */ +template +void resize_empty_padding(const size_t num_spatial, TContainer& pads_begin, TContainer& pads_end) { + if (pads_begin.empty()) { + pads_begin.resize(num_spatial); + } + + if (pads_end.empty()) { + pads_end.resize(num_spatial); + } +} + +/** + * @brief Apply pooling operator padding depends on auto pad value. + * + * @param op Pointer to Pooling operator to apply padding. + * @param data_shape Shape infer data input shape. + * @param dilations Kernel dilations. + * @param pads_begin Padding begin to update. + * @param pads_end Padding end to update. + */ +template +void apply_padding(const TOp* op, + const TShape& data_shape, + const Strides& dilations, + TContainer& pads_begin, + TContainer& pads_end) { + const auto& auto_pad = op->get_auto_pad(); + if (data_shape.rank().is_static() && (auto_pad == PadType::SAME_UPPER || auto_pad == PadType::SAME_LOWER)) { + const auto& kernel = op->get_kernel(); + const auto& strides = op->get_strides(); + const auto num_spatial = kernel.size(); + pads_begin.reserve(num_spatial); + pads_end.reserve(num_spatial); + + auto data_dim = data_shape.cbegin() + spatial_dim_offset; + auto pad_b = auto_pad == PadType::SAME_UPPER ? pads_begin.begin() : pads_end.begin(); + auto pad_e = auto_pad == PadType::SAME_UPPER ? pads_end.begin() : pads_begin.begin(); + + for (size_t i = 0; i < num_spatial; ++i, ++pad_b, ++pad_e, ++data_dim) { + using namespace ov::util; + std::tie(*pad_b, *pad_e) = dim::padding(*data_dim, kernel[i], dilations[i], strides[i]); + } + } else if (auto_pad == PadType::VALID) { + std::fill_n(pads_begin.begin(), pads_begin.size(), 0); + std::fill_n(pads_end.begin(), pads_end.size(), 0); + } else if (op->get_auto_pad() == op::PadType::EXPLICIT) { + std::copy(op->get_pads_begin().begin(), op->get_pads_begin().end(), pads_begin.begin()); + std::copy(op->get_pads_end().begin(), op->get_pads_end().end(), pads_end.begin()); + } +} template void valid_dilated_kernel_with_dim(const TOp* op, const size_t kernel, const TDim& dim, const size_t axis) { @@ -116,8 +151,23 @@ void valid_dilated_kernel_with_padding(const TOp* op, const size_t pad_end, const size_t axis) {} -template -TShape spatial_shape_infer(const TOp* op, const TShape& data_shape, const Strides& dilations) { +/** + * @brief Append spatial shape to the end of output shape for pooling operator shape inference result. + * + * @param op Pointer to pooling operator. + * @param data_shape Shape inference input pooling data shape. + * @param pads_begin Pooling pads begin. + * @param pads_end Pooling pads end. + * @param dilations Kernel dilations. + * @param out_shape Output shape for appending the spatial shape of pooling + */ +template +void append_spatial_shape(const TOp* op, + const TShape& data_shape, + const TContainer& pads_begin, + const TContainer& pads_end, + const Strides& dilations, + TShape& out_shape) { using namespace ov::util; const auto spatial_num = data_shape.size() - spatial_dim_offset; const auto is_ceil_mode = op->get_rounding_type() == RoundingType::CEIL; @@ -126,12 +176,7 @@ TShape spatial_shape_infer(const TOp* op, const TShape& data_shape, const Stride using TDim = typename TShape::value_type; const auto& dim_divide = is_ceil_mode ? dim::ceil_div : dim::floor_div; - TShape out_shape; - out_shape.reserve(spatial_num); - auto data_dim = data_shape.cbegin() + spatial_dim_offset; - const auto& pads_begin = op->get_pads_begin(); - const auto& pads_end = op->get_pads_end(); const auto& kernel = op->get_kernel(); const auto& stride = op->get_strides(); @@ -155,15 +200,18 @@ TShape spatial_shape_infer(const TOp* op, const TShape& data_shape, const Stride out_shape.emplace_back(dim::inf_bound); } } - - return out_shape; } /** * @brief Shape inference helper used for pooling operators such Max Pool, Avg Pool. */ -template -TShape out_shape_infer(const TOp* op, const TShape& data_shape, const Strides& dilations) { +template +TShape out_shape_infer(const TOp* op, + const TShape& data_shape, + const TContainer& pads_begin, + const TContainer& pads_end, + const Strides& dilations) { + const auto out_rank_size = spatial_dim_offset + op->get_kernel().size(); TShape out_shape; if (data_shape.rank().is_static()) { const auto& batch_size = data_shape[0]; @@ -174,8 +222,9 @@ TShape out_shape_infer(const TOp* op, const TShape& data_shape, const Strides& d channel_count.is_dynamic() || channel_count.get_length() > 0, "Channel count is zero."); - out_shape = spatial_shape_infer(op, data_shape, dilations); - out_shape.insert(out_shape.begin(), data_shape.begin(), data_shape.begin() + spatial_dim_offset); + out_shape.reserve(out_rank_size); + std::copy_n(data_shape.begin(), spatial_dim_offset, std::back_inserter(out_shape)); + pooling::append_spatial_shape(op, data_shape, pads_begin, pads_end, dilations, out_shape); } else { out_shape.insert(out_shape.begin(), spatial_dim_offset + op->get_kernel().size(), Dimension::dynamic()); } diff --git a/src/core/src/op/avg_pool.cpp b/src/core/src/op/avg_pool.cpp index 8e63d173144..a9edcaf18c7 100644 --- a/src/core/src/op/avg_pool.cpp +++ b/src/core/src/op/avg_pool.cpp @@ -47,7 +47,7 @@ bool ov::op::v1::AvgPool::visit_attributes(AttributeVisitor& visitor) { void ov::op::v1::AvgPool::validate_and_infer_types() { OV_OP_SCOPE(v1_AvgPool_validate_and_infer_types); - const auto output_shapes = shape_infer(this, get_node_input_partial_shapes(*this)); + const auto output_shapes = shape_infer(this, get_node_input_partial_shapes(*this), m_pads_begin, m_pads_end); set_output_type(0, get_input_element_type(0), output_shapes.front()); } diff --git a/src/core/src/op/max_pool.cpp b/src/core/src/op/max_pool.cpp index 13d98688c4d..68c31b26a16 100644 --- a/src/core/src/op/max_pool.cpp +++ b/src/core/src/op/max_pool.cpp @@ -41,7 +41,7 @@ bool ngraph::op::v1::MaxPool::visit_attributes(AttributeVisitor& visitor) { void op::v1::MaxPool::validate_and_infer_types() { OV_OP_SCOPE(v1_MaxPool_validate_and_infer_types); - const auto output_shapes = shape_infer(this, get_node_input_partial_shapes(*this)); + const auto output_shapes = shape_infer(this, get_node_input_partial_shapes(*this), m_pads_begin, m_pads_end); set_output_type(0, get_input_element_type(0), output_shapes.front()); } @@ -108,7 +108,9 @@ bool evaluate_maxpool(const HostTensorPtr& arg, bool op::v1::MaxPool::evaluate_maxpool(const HostTensorVector& outputs, const HostTensorVector& inputs) const { const auto input_shapes = std::vector{inputs[0]->get_partial_shape()}; - auto out_shape = shape_infer(this, input_shapes).front(); + auto pads_begin = m_pads_begin; + auto pads_end = m_pads_end; + auto out_shape = shape_infer(this, input_shapes, pads_begin, pads_end).front(); return maxpool::evaluate_maxpool(inputs[0], outputs[0], @@ -276,7 +278,7 @@ void op::v8::MaxPool::validate_and_infer_types() { m_axis = ngraph::normalize_axis(this, m_axis, input_shape.rank()); } - const auto output_shapes = shape_infer(this, get_node_input_partial_shapes(*this)); + const auto output_shapes = shape_infer(this, get_node_input_partial_shapes(*this), m_pads_begin, m_pads_end); set_output_type(0, get_input_element_type(0), output_shapes[0]); set_output_type(1, m_index_element_type, output_shapes[1]); } @@ -318,7 +320,9 @@ bool op::v8::MaxPool::evaluate(const HostTensorVector& outputs, const HostTensor OV_OP_SCOPE(v8_MaxPool_evaluate); const auto input_shapes = std::vector{inputs[0]->get_partial_shape()}; - auto out_shape = shape_infer(this, input_shapes).front(); + auto pads_begin = m_pads_begin; + auto pads_end = m_pads_end; + auto out_shape = shape_infer(this, input_shapes, pads_begin, pads_end).front(); return maxpool_v8::evaluate_maxpool(inputs[0], outputs[0], diff --git a/src/plugins/intel_cpu/src/utils/shape_inference/shape_inference.cpp b/src/plugins/intel_cpu/src/utils/shape_inference/shape_inference.cpp index f08c33d9352..8b4342e3be3 100644 --- a/src/plugins/intel_cpu/src/utils/shape_inference/shape_inference.cpp +++ b/src/plugins/intel_cpu/src/utils/shape_inference/shape_inference.cpp @@ -261,33 +261,6 @@ public: } }; -template -ov::CoordinateDiff convertPadding(const TContainer& newPads) { - return {newPads.begin(), newPads.end()}; -} - -template -class entryFallbackWithPadding : public entryFallback { -public: - using entryFallback::entryFallback; - - ov::CoordinateDiff pads_begin, pads_end; - - const ov::CoordinateDiff& get_pads_begin() override { - return pads_begin; - } - const ov::CoordinateDiff& get_pads_end() override { - return pads_end; - } - - void post_validate_and_infer_types(const std::shared_ptr& local_op) override { - auto node = dynamic_cast(local_op.get()); - OPENVINO_ASSERT(node); - pads_begin = convertPadding(node->get_pads_begin()); - pads_end = convertPadding(node->get_pads_end()); - } -}; - template class entryInterpolate : public entryBase { public: @@ -304,39 +277,6 @@ public: } }; -template -class ShapeInferWithPaddingConvert : public entryBase { -public: - ShapeInferWithPaddingConvert(std::shared_ptr node) - : entryBase{std::move(node)}, - m_pads_begin{}, - m_pads_end{} {} - - IShapeInferCommon::Result infer(const std::vector& input_shapes, - const std::map& constant_data) override { - auto out_shapes = shape_infer(static_cast(node.get()), input_shapes); - on_infer_exit(); - return {std::move(out_shapes), ShapeInferStatus::success}; - } - - const ov::CoordinateDiff& get_pads_begin() override { - return m_pads_begin; - } - - const ov::CoordinateDiff& get_pads_end() override { - return m_pads_end; - } - -protected: - void on_infer_exit() { - auto op = static_cast(node.get()); - m_pads_begin = convertPadding(op->get_pads_begin()); - m_pads_end = convertPadding(op->get_pads_end()); - } - - ov::CoordinateDiff m_pads_begin, m_pads_end; -}; - template class ShapeInferWithPadding : public entryBase { public: @@ -516,7 +456,7 @@ const IShapeInferCommonFactory::TRegistry IShapeInferCommonFactory::registry{ _OV_OP_SHAPE_INFER_REG(AdaptiveAvgPool, entryIOC), _OV_OP_SHAPE_INFER_REG(AdaptiveMaxPool, entryIOC), _OV_OP_SHAPE_INFER_REG(Assign, entryIO), - _OV_OP_SHAPE_INFER_REG(AvgPool, ShapeInferWithPaddingConvert), + _OV_OP_SHAPE_INFER_REG(AvgPool, ShapeInferWithPadding), _OV_OP_SHAPE_INFER_REG(BatchToSpace, entryIOC), _OV_OP_SHAPE_INFER_REG(BinaryConvolution, ShapeInferWithPadding), _OV_OP_SHAPE_INFER_REG(Broadcast, entryIOC), @@ -557,7 +497,7 @@ const IShapeInferCommonFactory::TRegistry IShapeInferCommonFactory::registry{ _OV_OP_SHAPE_INFER_REG(IRDFT, entryIOC), _OV_OP_SHAPE_INFER_REG(LSTMCell, entryIO), _OV_OP_SHAPE_INFER_REG(MatMul, entryIO), - _OV_OP_SHAPE_INFER_REG(MaxPool, ShapeInferWithPaddingConvert), + _OV_OP_SHAPE_INFER_REG(MaxPool, ShapeInferWithPadding), _OV_OP_SHAPE_INFER_REG(OneHot, entryIOC), _OV_OP_SHAPE_INFER_REG(ov::op::internal::AUGRUCell, entryIO), _OV_OP_SHAPE_INFER_REG(ov::op::internal::AUGRUSequence, entryIO), @@ -617,7 +557,7 @@ const IShapeInferCommonFactory::TRegistry IShapeInferCommonFactory::registry{ _OV_OP_SHAPE_INFER_REG(opset1::DetectionOutput, entryIO), _OV_OP_SHAPE_INFER_REG(opset1::Interpolate, entryIOC), _OV_OP_SHAPE_INFER_REG(opset1::LSTMCell, entryIO), - _OV_OP_SHAPE_INFER_REG(opset1::MaxPool, ShapeInferWithPaddingConvert), + _OV_OP_SHAPE_INFER_REG(opset1::MaxPool, ShapeInferWithPadding), _OV_OP_SHAPE_INFER_REG(opset1::Proposal, entryIO), _OV_OP_SHAPE_INFER_REG(opset1::Range, entryIOC), _OV_OP_SHAPE_INFER_REG(opset1::ShapeOf, entryIO),