Migrate ReduceL1, ReduceL2 to new API (#19622)

* Migrate ReduceL1, ReduceL2 to new API
- add some new utils which are deprecated

* Add missing include

* Remove debug message

* Hide helper functions from public API
This commit is contained in:
Pawel Raasz 2023-09-11 05:17:57 +02:00 committed by GitHub
parent 51d77cb59f
commit 5833e7d55d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 282 additions and 171 deletions

View File

@ -26,5 +26,11 @@ OPENVINO_API Shape make_dynamic_shape();
*/
OPENVINO_DEPRECATED("This function is deprecated and will be removed soon.")
OPENVINO_API bool is_dynamic_shape(const Shape& s);
OPENVINO_API Shape reduce(const Shape& input, const AxisSet& axes);
OPENVINO_API Shape reduce(const Shape& input, const AxisSet& axes, const bool keep_dims);
OPENVINO_API std::vector<size_t> reduce(const std::vector<size_t>& input, const AxisSet& axes);
OPENVINO_API Shape reduce_keep_dims(const Shape& input, const AxisSet& axes);
} // namespace util
} // namespace ov

View File

@ -28,9 +28,7 @@ public:
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
OPENVINO_SUPPRESS_DEPRECATED_START
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
OPENVINO_SUPPRESS_DEPRECATED_END
bool evaluate(TensorVector& outputs, const TensorVector& inputs) const override;
bool has_evaluate() const override;
};
} // namespace v4

View File

@ -27,9 +27,7 @@ public:
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
OPENVINO_SUPPRESS_DEPRECATED_START
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
OPENVINO_SUPPRESS_DEPRECATED_END
bool evaluate(TensorVector& outputs, const TensorVector& inputs) const override;
bool has_evaluate() const override;
};
} // namespace v4

View File

@ -0,0 +1,26 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "openvino/core/axis_set.hpp"
#include "openvino/core/rank.hpp"
#include "openvino/runtime/tensor.hpp"
namespace ov {
namespace op {
namespace util {
/**
* @brief Get the normalized axes as ov::AxisSet from raw tensor data.
*
* @param node A node pointer used for detailed description if normalization fails.
* @param tensor Tensor with axes for normalization.
* @param rank Rank value to normalize axes.
* @return Normalized axes as set.
*/
AxisSet get_normalized_axes_from_tensor(const Node* const node, const Tensor& tensor, const Rank& rank);
} // namespace util
} // namespace op
} // namespace ov

View File

@ -7,33 +7,55 @@
#include <cmath>
#include <numeric>
#include "ngraph/shape_util.hpp"
#include "openvino/reference/utils/coordinate_transform.hpp"
#include "openvino/reference/sum.hpp"
#include "openvino/reference/utils/type_util.hpp"
#include "shape_util.hpp"
namespace ov {
namespace reference {
template <typename T>
void reduce_l1(const T* arg, T* out, const Shape& in_shape, const AxisSet& reduction_axes) {
constexpr bool dont_keep_dims_in_output = false;
OPENVINO_SUPPRESS_DEPRECATED_START
const auto out_shape = ngraph::reduce(in_shape, reduction_axes, dont_keep_dims_in_output);
std::fill(out, out + shape_size(out_shape), T(0));
namespace func {
template <class T, typename std::enable_if<std::is_unsigned<T>::value>::type* = nullptr>
constexpr T abs(const T num) {
return num;
}
template <class T, typename std::enable_if<std::is_signed<T>::value || ov::is_floating_point<T>()>::type* = nullptr>
T abs(const T num) {
return std::abs(num);
}
} // namespace func
/**
* @brief Reference implementation of ReduceL1 operator.
*
* @param in Input iterator to data.
* @param out Output iterator to results.
* @param in_shape Input shape.
* @param reduction_axes Axes on which reduction is applied.
*/
template <class InputIt, class OutputIt>
void reduce_l1(InputIt in, OutputIt out, const Shape& in_shape, const AxisSet& reduction_axes) {
using T = typename std::iterator_traits<OutputIt>::value_type;
static_assert(std::is_same<typename std::iterator_traits<InputIt>::value_type, T>::value,
"Assume in and out same type.");
const auto out_shape = ov::util::reduce(in_shape, reduction_axes);
std::fill(out, std::next(out, shape_size(out_shape)), T(0));
const auto in_strides = row_major_strides(in_shape);
const auto out_strides = row_major_strides(out_shape);
CoordinateTransformBasic input_transform(in_shape);
for (const Coordinate& input_coord : input_transform) {
const Coordinate output_coord = ngraph::reduce(input_coord, reduction_axes, dont_keep_dims_in_output);
for (const Coordinate& in_coord : input_transform) {
constexpr uint64_t idx_init = 0;
const auto out_coord = ov::util::reduce(in_coord, reduction_axes);
const size_t in_idx =
std::inner_product(input_coord.begin(), input_coord.end(), in_strides.begin(), uint64_t(0));
const size_t out_idx =
std::inner_product(output_coord.begin(), output_coord.end(), out_strides.begin(), uint64_t(0));
const size_t in_idx = std::inner_product(in_coord.begin(), in_coord.end(), in_strides.begin(), idx_init);
const size_t out_idx = std::inner_product(out_coord.begin(), out_coord.end(), out_strides.begin(), idx_init);
out[out_idx] = out[out_idx] + std::abs(arg[in_idx]);
out[out_idx] += func::abs(in[in_idx]);
}
OPENVINO_SUPPRESS_DEPRECATED_END
}
} // namespace reference
} // namespace ov

View File

@ -4,39 +4,51 @@
#pragma once
#include <algorithm>
#include <cmath>
#include <numeric>
#include "ngraph/shape_util.hpp"
#include "openvino/reference/utils/coordinate_transform.hpp"
#include "shape_util.hpp"
namespace ov {
namespace reference {
template <typename T>
void reduce_l2(const T* arg, T* out, const Shape& in_shape, const AxisSet& reduction_axes) {
constexpr bool dont_keep_dims_in_output = false;
OPENVINO_SUPPRESS_DEPRECATED_START
const auto out_shape = ngraph::reduce(in_shape, reduction_axes, dont_keep_dims_in_output);
std::fill(out, out + shape_size(out_shape), T(0));
/**
* @brief Reference implementation of ReduceL2 operator.
*
* @param in Input iterator to data.
* @param out Output iterator to results.
* @param in_shape Input shape.
* @param reduction_axes Axes on which reduction is applied.
*/
template <class InputIt, class OutputIt>
void reduce_l2(InputIt in, OutputIt out, const Shape& in_shape, const AxisSet& reduction_axes) {
using T = typename std::iterator_traits<OutputIt>::value_type;
static_assert(std::is_same<typename std::iterator_traits<InputIt>::value_type, T>::value,
"Assume in and out same type.");
const auto out_shape = ov::util::reduce(in_shape, reduction_axes);
const auto out_last = std::next(out, shape_size(out_shape));
std::fill(out, out_last, T(0));
const auto in_strides = row_major_strides(in_shape);
const auto out_strides = row_major_strides(out_shape);
CoordinateTransformBasic input_transform(in_shape);
for (const Coordinate& input_coord : input_transform) {
const Coordinate output_coord = ngraph::reduce(input_coord, reduction_axes, dont_keep_dims_in_output);
for (const Coordinate& in_coord : input_transform) {
constexpr uint64_t idx_init = 0;
const auto out_coord = ov::util::reduce(in_coord, reduction_axes);
const size_t in_idx =
std::inner_product(input_coord.begin(), input_coord.end(), in_strides.begin(), uint64_t(0));
const size_t out_idx =
std::inner_product(output_coord.begin(), output_coord.end(), out_strides.begin(), uint64_t(0));
const size_t in_idx = std::inner_product(in_coord.begin(), in_coord.end(), in_strides.begin(), idx_init);
const size_t out_idx = std::inner_product(out_coord.begin(), out_coord.end(), out_strides.begin(), idx_init);
out[out_idx] = out[out_idx] + arg[in_idx] * arg[in_idx];
out[out_idx] += in[in_idx] * in[in_idx];
}
std::transform(out, out + shape_size(out_shape), out, [](T elem) {
return sqrt(elem);
std::transform(out, out_last, out, [](T elem) {
return static_cast<T>(std::sqrt(elem));
});
OPENVINO_SUPPRESS_DEPRECATED_END
}
} // namespace reference
} // namespace ov

View File

@ -1,16 +1,42 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <openvino/core/validation_util.hpp>
#include <openvino/op/util/arithmetic_reductions_keep_dims.hpp>
#include <openvino/op/util/logical_reduction_keep_dims.hpp>
#include <openvino/opsets/opset1.hpp>
#pragma once
#include "openvino/core/validation_util.hpp"
#include "openvino/op/util/arithmetic_reductions_keep_dims.hpp"
#include "openvino/op/util/logical_reduction_keep_dims.hpp"
#include "utils.hpp"
namespace ov {
namespace op {
namespace util {
template <class T>
result_shape_t<T> reduce_shape(const T& input_shape, std::vector<int64_t>& axes, const bool keep_dims) {
if (keep_dims) {
result_shape_t<T> result = input_shape;
result = input_shape;
for (auto&& axis : axes) {
result[axis] = 1;
}
return result;
} else {
const auto s = input_shape.size();
result_shape_t<T> result;
result.reserve(s);
for (size_t axis = 0; axis < s; ++axis) {
if (std::find(axes.begin(), axes.end(), axis) == axes.end()) {
result.emplace_back(input_shape[axis]);
}
}
return result;
}
}
} // namespace util
template <class TShape, class TRShape = result_shape_t<TShape>>
std::vector<TRShape> reduce_shape_infer(const util::ReductionBase* op,
bool keep_dims,
@ -38,21 +64,7 @@ std::vector<TRShape> reduce_shape_infer(const util::ReductionBase* op,
ov::normalize_axes(op, data_rank.get_length(), *axes_val);
OPENVINO_SUPPRESS_DEPRECATED_END
if (keep_dims) {
output_shapes.push_back(data_shape);
auto& output_shape = output_shapes[0];
for (const auto& axis : *axes_val) {
output_shape[axis] = 1;
}
} else {
output_shapes.resize(1);
auto& output_shape = output_shapes[0];
for (size_t i = 0; i < data_shape.size(); ++i) {
if (std::find(axes_val->begin(), axes_val->end(), i) == axes_val->end()) {
output_shape.push_back(data_shape[i]);
}
}
}
output_shapes.push_back(util::reduce_shape(data_shape, *axes_val, keep_dims));
} else {
if (keep_dims) {
output_shapes.push_back(ov::PartialShape::dynamic(data_shape.rank()));

View File

@ -98,8 +98,8 @@ OPENVINO_SUPPRESS_DEPRECATED_END
*
* \return Object of TResult with data from tensor.
*/
template <class T, class TResult = std::vector<T>, class UnaryOperation>
TResult get_tensor_data_as(const Tensor& t, UnaryOperation&& func) {
template <class T, class TResult = std::vector<T>, class UnaryOperation = ov::util::Cast<T>>
TResult get_tensor_data_as(const Tensor& t, UnaryOperation&& func = ov::util::Cast<T>()) {
return get_raw_data_as<T, TResult>(t.get_element_type(),
t.data(),
t.get_size(),

View File

@ -2,84 +2,71 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph/op/reduce_l1.hpp"
#include <ngraph/validation_util.hpp>
#include "openvino/op/reduce_l1.hpp"
#include "element_visitor.hpp"
#include "itt.hpp"
#include "ngraph/graph_util.hpp"
#include "ngraph/op/util/evaluate_helpers.hpp"
#include "ngraph/runtime/host_tensor.hpp"
#include "ngraph/shape_util.hpp"
#include "openvino/core/validation_util.hpp"
#include "openvino/op/util/axes_util.hpp"
#include "openvino/reference/reduce_l1.hpp"
#include "reduce_shape_inference.hpp"
using namespace std;
using namespace ngraph;
namespace ov {
namespace op {
op::v4::ReduceL1::ReduceL1(const Output<Node>& arg, const Output<Node>& reduction_axes, bool keep_dims)
namespace reduce_l1 {
struct Evaluate : element::NoAction<bool> {
using element::NoAction<bool>::visit;
template <element::Type_t ET>
static result_type visit(const Tensor& in0, Tensor& out, const AxisSet& reduction_axes) {
using T = fundamental_type_for<ET>;
reference::reduce_l1(in0.data<T>(), out.data<T>(), in0.get_shape(), reduction_axes);
return true;
}
};
} // namespace reduce_l1
namespace v4 {
ReduceL1::ReduceL1(const Output<Node>& arg, const Output<Node>& reduction_axes, bool keep_dims)
: ArithmeticReductionKeepDims(arg, reduction_axes, keep_dims) {
constructor_validate_and_infer_types();
}
shared_ptr<Node> op::v4::ReduceL1::clone_with_new_inputs(const OutputVector& new_args) const {
std::shared_ptr<Node> ReduceL1::clone_with_new_inputs(const OutputVector& new_args) const {
OV_OP_SCOPE(v4_ReduceL1_clone_with_new_inputs);
check_new_args_count(this, new_args);
return make_shared<op::v4::ReduceL1>(new_args.at(0), new_args.at(1), get_keep_dims());
return std::make_shared<ReduceL1>(new_args.at(0), new_args.at(1), get_keep_dims());
}
OPENVINO_SUPPRESS_DEPRECATED_START
namespace reduce_l1 {
namespace {
template <element::Type_t ET>
bool evaluate(const HostTensorPtr& arg, const HostTensorPtr& out, const AxisSet& axes, bool keep_dims) {
OPENVINO_SUPPRESS_DEPRECATED_START
out->set_shape(reduce(arg->get_shape(), axes, keep_dims));
OPENVINO_SUPPRESS_DEPRECATED_END
ov::reference::reduce_l1(arg->get_data_ptr<ET>(), out->get_data_ptr<ET>(), arg->get_shape(), axes);
return true;
}
bool evaluate_sum(const HostTensorPtr& arg, const HostTensorPtr& out, const AxisSet& axes, bool keep_dims) {
bool rc = true;
switch (arg->get_element_type()) {
NGRAPH_TYPE_CASE(evaluate_reducel1_sum, i32, arg, out, axes, keep_dims);
NGRAPH_TYPE_CASE(evaluate_reducel1_sum, i64, arg, out, axes, keep_dims);
NGRAPH_TYPE_CASE(evaluate_reducel1_sum, bf16, arg, out, axes, keep_dims);
NGRAPH_TYPE_CASE(evaluate_reducel1_sum, f16, arg, out, axes, keep_dims);
NGRAPH_TYPE_CASE(evaluate_reducel1_sum, f32, arg, out, axes, keep_dims);
default:
rc = false;
break;
}
return rc;
}
} // namespace
} // namespace reduce_l1
bool op::v4::ReduceL1::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const {
bool ReduceL1::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
OV_OP_SCOPE(v4_ReduceL1_evaluate);
OPENVINO_SUPPRESS_DEPRECATED_START
NGRAPH_CHECK(validate_host_tensor_vector(inputs, 2));
NGRAPH_CHECK(validate_host_tensor_vector(outputs, 1));
OPENVINO_ASSERT(outputs.size() == 1);
OPENVINO_ASSERT(inputs.size() == 2);
const auto reduction_axes =
get_normalized_axes_from_tensor(inputs[1], inputs[0]->get_partial_shape().rank(), get_friendly_name());
OPENVINO_SUPPRESS_DEPRECATED_END
const auto reduction_axes = get_normalized_axes_from_tensor(this, inputs[1], inputs[0].get_shape().size());
outputs[0].set_shape(ov::util::reduce(inputs[0].get_shape(), reduction_axes, get_keep_dims()));
return reduce_l1::evaluate_sum(inputs[0], outputs[0], reduction_axes, get_keep_dims());
using namespace ov::element;
return IfTypeOf<i32, i64, bf16, f16, f32>::apply<reduce_l1::Evaluate>(inputs[0].get_element_type(),
inputs[0],
outputs[0],
reduction_axes);
}
bool op::v4::ReduceL1::has_evaluate() const {
bool ReduceL1::has_evaluate() const {
OV_OP_SCOPE(v4_ReduceL1_has_evaluate);
switch (get_input_element_type(0)) {
case ngraph::element::i32:
case ngraph::element::i64:
case ngraph::element::bf16:
case ngraph::element::f16:
case ngraph::element::f32:
case element::i32:
case element::i64:
case element::bf16:
case element::f16:
case element::f32:
return true;
default:
break;
return false;
}
return false;
}
} // namespace v4
} // namespace op
} // namespace ov

View File

@ -2,80 +2,68 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph/op/reduce_l2.hpp"
#include <ngraph/validation_util.hpp>
#include "openvino/op/reduce_l2.hpp"
#include "element_visitor.hpp"
#include "itt.hpp"
#include "ngraph/graph_util.hpp"
#include "ngraph/op/util/evaluate_helpers.hpp"
#include "ngraph/runtime/host_tensor.hpp"
#include "ngraph/shape_util.hpp"
#include "openvino/op/util/axes_util.hpp"
#include "openvino/reference/reduce_l2.hpp"
#include "reduce_shape_inference.hpp"
using namespace std;
using namespace ngraph;
namespace ov {
namespace op {
op::v4::ReduceL2::ReduceL2(const Output<Node>& arg, const Output<Node>& reduction_axes, bool keep_dims)
namespace reduce_l2 {
struct Evaluate : element::NoAction<bool> {
using element::NoAction<bool>::visit;
template <element::Type_t ET>
static result_type visit(const Tensor& in0, Tensor& out, const AxisSet& reduction_axes) {
using T = fundamental_type_for<ET>;
reference::reduce_l2(in0.data<T>(), out.data<T>(), in0.get_shape(), reduction_axes);
return true;
}
};
} // namespace reduce_l2
namespace v4 {
ReduceL2::ReduceL2(const Output<Node>& arg, const Output<Node>& reduction_axes, bool keep_dims)
: ArithmeticReductionKeepDims(arg, reduction_axes, keep_dims) {
constructor_validate_and_infer_types();
}
shared_ptr<Node> op::v4::ReduceL2::clone_with_new_inputs(const OutputVector& new_args) const {
std::shared_ptr<Node> ReduceL2::clone_with_new_inputs(const OutputVector& new_args) const {
OV_OP_SCOPE(v4_ReduceL2_clone_with_new_inputs);
check_new_args_count(this, new_args);
return make_shared<op::v4::ReduceL2>(new_args.at(0), new_args.at(1), get_keep_dims());
return std::make_shared<op::v4::ReduceL2>(new_args.at(0), new_args.at(1), get_keep_dims());
}
OPENVINO_SUPPRESS_DEPRECATED_START
namespace reduce_l2 {
namespace {
template <element::Type_t ET>
bool evaluate(const HostTensorPtr& arg, const HostTensorPtr& out, const AxisSet& axes, bool keep_dims) {
OPENVINO_SUPPRESS_DEPRECATED_START
out->set_shape(reduce(arg->get_shape(), axes, keep_dims));
OPENVINO_SUPPRESS_DEPRECATED_END
ov::reference::reduce_l2(arg->get_data_ptr<ET>(), out->get_data_ptr<ET>(), arg->get_shape(), axes);
return true;
}
bool evaluate_reduce_l2(const HostTensorPtr& arg, const HostTensorPtr& out, const AxisSet& axes, bool keep_dims) {
bool rc = true;
switch (arg->get_element_type()) {
NGRAPH_TYPE_CASE(evaluate_reduce_l2, bf16, arg, out, axes, keep_dims);
NGRAPH_TYPE_CASE(evaluate_reduce_l2, f16, arg, out, axes, keep_dims);
NGRAPH_TYPE_CASE(evaluate_reduce_l2, f32, arg, out, axes, keep_dims);
default:
rc = false;
break;
}
return rc;
}
} // namespace
} // namespace reduce_l2
bool op::v4::ReduceL2::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const {
bool ReduceL2::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
OV_OP_SCOPE(v4_ReduceL2_evaluate);
OPENVINO_SUPPRESS_DEPRECATED_START
NGRAPH_CHECK(validate_host_tensor_vector(inputs, 2));
NGRAPH_CHECK(validate_host_tensor_vector(outputs, 1));
OPENVINO_ASSERT(outputs.size() == 1);
OPENVINO_ASSERT(inputs.size() == 2);
const auto reduction_axes =
get_normalized_axes_from_tensor(inputs[1], inputs[0]->get_partial_shape().rank(), get_friendly_name());
OPENVINO_SUPPRESS_DEPRECATED_END
const auto reduction_axes = get_normalized_axes_from_tensor(this, inputs[1], inputs[0].get_shape().size());
outputs[0].set_shape(ov::util::reduce(inputs[0].get_shape(), reduction_axes, get_keep_dims()));
return reduce_l2::evaluate_reduce_l2(inputs[0], outputs[0], reduction_axes, get_keep_dims());
using namespace ov::element;
return IfTypeOf<bf16, f16, f32>::apply<reduce_l2::Evaluate>(inputs[0].get_element_type(),
inputs[0],
outputs[0],
reduction_axes);
}
bool op::v4::ReduceL2::has_evaluate() const {
bool ReduceL2::has_evaluate() const {
OV_OP_SCOPE(v4_ReduceL2_has_evaluate);
switch (get_input_element_type(0)) {
case ngraph::element::bf16:
case ngraph::element::f16:
case ngraph::element::f32:
case element::bf16:
case element::f16:
case element::f32:
return true;
default:
break;
return false;
}
return false;
}
} // namespace v4
} // namespace op
} // namespace ov

View File

@ -0,0 +1,22 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "openvino/op/util/axes_util.hpp"
#include "openvino/core/validation_util.hpp"
#include "utils.hpp"
namespace ov {
namespace op {
namespace util {
AxisSet get_normalized_axes_from_tensor(const Node* const node, const Tensor& tensor, const Rank& rank) {
const auto axes = ov::get_tensor_data_as<int64_t>(tensor, ov::util::Cast<int64_t>());
OPENVINO_SUPPRESS_DEPRECATED_START
return {normalize_axes(node->get_friendly_name(), axes, rank)};
OPENVINO_SUPPRESS_DEPRECATED_END
}
} // namespace util
} // namespace op
} // namespace ov

View File

@ -77,6 +77,30 @@ PartialShape ngraph::inject_pairs(const PartialShape& shape,
}
namespace ov {
template <class TContainer, class TAxes>
TContainer reduce_container(const TContainer& input, const TAxes& axes) {
TContainer result;
const auto input_size = input.size();
result.reserve(input_size);
for (size_t axis = 0; axis < input_size; ++axis) {
if (std::find(axes.begin(), axes.end(), axis) == axes.end()) {
result.emplace_back(input[axis]);
}
}
return result;
}
template <class TContainer, class TAxes>
TContainer replace_container(const TContainer& input, const TAxes& axes) {
auto result = input;
for (auto&& axis : axes) {
result[axis] = 1;
}
return result;
}
namespace util {
Shape make_dynamic_shape() {
return Shape{0, std::numeric_limits<size_t>::max()};
@ -88,5 +112,21 @@ bool is_dynamic_shape(const Shape& s) {
OPENVINO_SUPPRESS_DEPRECATED_END
return s == dyn_shape;
}
Shape reduce(const Shape& input, const AxisSet& axes) {
return ov::reduce_container(input, axes);
}
Shape reduce(const Shape& input, const AxisSet& axes, const bool keep_dims) {
return keep_dims ? reduce_keep_dims(input, axes) : reduce(input, axes);
}
std::vector<size_t> reduce(const std::vector<size_t>& input, const AxisSet& axes) {
return ov::reduce_container(input, axes);
}
Shape reduce_keep_dims(const Shape& input, const AxisSet& axes) {
return ov::replace_container(input, axes);
}
} // namespace util
} // namespace ov