[core]Migrate reduce ops max min mean prod sum evaluate to new API (#19756)

* Migrate ReduceL1, ReduceL2 to new API
- add some new utils which are deprecated

* Hide helper functions from public API

* Migrate reductions ops to new API

* Migrate get_constant_from_source to dev API

* Rename ref max to reduce_max

* Rename ref min to reduce_min

* Rename ref mean to reduce_mean

* Rename ref sum to reduce_sum

* Rename ref product to reduce_prod
- minor optimization in ReduceProd operator

* Restore custom isfinite for ov float types

* Fix type name in reduce_max.hpp

* Add missing include in shape_util.hpp

* Make count same type as data type in reduce mean

* Correct reduce sum doxy comment
This commit is contained in:
Pawel Raasz 2023-09-28 12:12:21 +02:00 committed by GitHub
parent f9678a285c
commit 197e954846
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
34 changed files with 567 additions and 663 deletions

View File

@ -38,5 +38,14 @@ OPENVINO_API int64_t clip(const int64_t& value, const int64_t& min, const int64_
///
/// \return Constant node or nullptr if unable to constantfold the subgraph
OPENVINO_API std::shared_ptr<op::v0::Constant> constantfold_subgraph(const Output<Node>& subgraph_sink);
/**
* @brief Runs an estimation of source tensor. If it succeeded to calculate both bounds and
* they are the same returns Constant operation from the resulting bound, otherwise nullptr.
*
* @param source Node output used to get its tensor data as constant.
* @return Shared pointer to constant data or nullptr.
*/
OPENVINO_API std::shared_ptr<op::v0::Constant> get_constant_from_source(const Output<Node>& source);
} // namespace util
} // namespace ov

View File

@ -26,9 +26,7 @@ public:
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
OPENVINO_SUPPRESS_DEPRECATED_START
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
OPENVINO_SUPPRESS_DEPRECATED_END
bool evaluate(TensorVector& outputs, const TensorVector& inputs) const override;
bool has_evaluate() const override;
bool evaluate_lower(TensorVector& outputs) const override;
bool evaluate_upper(TensorVector& outputs) const override;

View File

@ -24,9 +24,7 @@ public:
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
OPENVINO_SUPPRESS_DEPRECATED_START
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
OPENVINO_SUPPRESS_DEPRECATED_END
bool evaluate(TensorVector& outputs, const TensorVector& inputs) const override;
bool has_evaluate() const override;
};
} // namespace v1

View File

@ -26,9 +26,7 @@ public:
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
OPENVINO_SUPPRESS_DEPRECATED_START
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
OPENVINO_SUPPRESS_DEPRECATED_END
bool evaluate(TensorVector& outputs, const TensorVector& inputs) const override;
bool has_evaluate() const override;
bool evaluate_lower(TensorVector& outputs) const override;
bool evaluate_upper(TensorVector& outputs) const override;

View File

@ -27,9 +27,7 @@ public:
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
OPENVINO_SUPPRESS_DEPRECATED_START
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
OPENVINO_SUPPRESS_DEPRECATED_END
bool evaluate(TensorVector& outputs, const TensorVector& inputs) const override;
bool has_evaluate() const override;
bool evaluate_lower(TensorVector& outputs) const override;
bool evaluate_upper(TensorVector& outputs) const override;

View File

@ -73,9 +73,7 @@ public:
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
OPENVINO_SUPPRESS_DEPRECATED_START
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
OPENVINO_SUPPRESS_DEPRECATED_END
bool evaluate(TensorVector& outputs, const TensorVector& inputs) const override;
bool has_evaluate() const override;
};
} // namespace v1

View File

@ -8,8 +8,8 @@
#include <numeric>
#include "openvino/core/shape.hpp"
#include "openvino/reference/mean.hpp"
#include "openvino/reference/sum.hpp"
#include "openvino/reference/reduce_mean.hpp"
#include "openvino/reference/reduce_sum.hpp"
namespace ov {
namespace reference {
@ -38,11 +38,11 @@ void group_normalization(const T* const data,
const auto group_begin = data + n * batch_size + g * group_size;
const auto group_end = group_begin + group_size;
std::vector<T> mean_value(1);
mean(group_begin, mean_value.data(), Shape{group_size}, {0});
reduce_mean(group_begin, mean_value.data(), Shape{group_size}, {0});
T mean = mean_value[0];
T variance = 0, err = 0;
for_each(group_begin, group_end, [&](const T d) {
return details::kahan_summation(static_cast<T>(pow(d - mean, 2)), err, variance);
variance = details::kahan_summation(static_cast<T>(pow(d - mean, 2)), variance, err);
});
variance /= group_size;
const T standard_deviation = sqrt(variance + eps);

View File

@ -7,8 +7,8 @@
#include <cmath>
#include "ngraph/shape_util.hpp"
#include "openvino/reference/max.hpp"
#include "openvino/reference/sum.hpp"
#include "openvino/reference/reduce_max.hpp"
#include "openvino/reference/reduce_sum.hpp"
#include "openvino/reference/utils/coordinate_transform.hpp"
namespace ov {
@ -21,7 +21,7 @@ void log_softmax(const T* arg, T* out, const Shape& shape, const AxisSet& axes)
auto temp_max = std::vector<T>(temp_elements, 0);
auto temp_sum = std::vector<T>(temp_elements, 0);
max(arg, temp_max.data(), shape, axes);
reduce_max(arg, temp_max.data(), shape, axes);
CoordinateTransform transform(shape);
CoordinateTransform temp_transform(temp_shape);
@ -31,7 +31,7 @@ void log_softmax(const T* arg, T* out, const Shape& shape, const AxisSet& axes)
static_cast<T>(std::exp(arg[transform.index(coord)] - temp_max[temp_transform.index(temp_coord)]));
}
sum(out, temp_sum.data(), shape, axes);
reduce_sum(out, temp_sum.data(), shape, axes);
for (const Coordinate& coord : transform) {
Coordinate temp_coord = ngraph::reduce(coord, axes, true);

View File

@ -1,46 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <cmath>
#include <limits>
#include <numeric>
#include "ngraph/shape_util.hpp"
#include "openvino/reference/utils/coordinate_transform.hpp"
namespace ov {
namespace reference {
template <typename T>
void max(const T* arg, T* out, const Shape& in_shape, const AxisSet& reduction_axes) {
T minval = std::numeric_limits<T>::lowest();
constexpr bool dont_keep_dims_in_output = false;
OPENVINO_SUPPRESS_DEPRECATED_START
const auto out_shape = ngraph::reduce(in_shape, reduction_axes, dont_keep_dims_in_output);
std::fill(out, out + shape_size(out_shape), minval);
const auto in_strides = row_major_strides(in_shape);
const auto out_strides = row_major_strides(out_shape);
CoordinateTransformBasic input_transform(in_shape);
for (const Coordinate& input_coord : input_transform) {
const Coordinate output_coord = ngraph::reduce(input_coord, reduction_axes, dont_keep_dims_in_output);
const size_t in_idx =
std::inner_product(input_coord.begin(), input_coord.end(), in_strides.begin(), uint64_t(0));
const size_t out_idx =
std::inner_product(output_coord.begin(), output_coord.end(), out_strides.begin(), uint64_t(0));
const T x = arg[in_idx];
const T max = out[out_idx];
if (x > max) {
out[out_idx] = x;
}
}
OPENVINO_SUPPRESS_DEPRECATED_END
}
} // namespace reference
} // namespace ov

View File

@ -1,58 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <cmath>
#include <map>
#include <numeric>
#include <vector>
#include "ngraph/shape_util.hpp"
#include "ngraph/type/bfloat16.hpp"
#include "ngraph/type/float16.hpp"
#include "openvino/reference/sum.hpp"
#include "openvino/reference/utils/coordinate_transform.hpp"
namespace ov {
namespace reference {
template <typename T>
void mean(const T* arg, T* out, const Shape& in_shape, const AxisSet& reduction_axes) {
constexpr bool dont_keep_dims_in_output = false;
OPENVINO_SUPPRESS_DEPRECATED_START
const auto out_shape = ngraph::reduce(in_shape, reduction_axes, dont_keep_dims_in_output);
std::vector<T> cs(shape_size(out_shape), 0);
std::fill(out, out + shape_size(out_shape), T(0));
const auto in_strides = row_major_strides(in_shape);
const auto out_strides = row_major_strides(out_shape);
CoordinateTransformBasic input_transform(in_shape);
std::map<size_t, int> index_to_count_map;
for (const Coordinate& input_coord : input_transform) {
const Coordinate output_coord = ngraph::reduce(input_coord, reduction_axes, dont_keep_dims_in_output);
const size_t in_idx =
std::inner_product(input_coord.begin(), input_coord.end(), in_strides.begin(), uint64_t(0));
const size_t out_idx =
std::inner_product(output_coord.begin(), output_coord.end(), out_strides.begin(), uint64_t(0));
details::kahan_summation(arg[in_idx], cs[out_idx], out[out_idx]);
if (index_to_count_map.find(out_idx) == index_to_count_map.end()) {
index_to_count_map[out_idx] = 1;
} else {
index_to_count_map[out_idx]++;
}
}
OPENVINO_SUPPRESS_DEPRECATED_END
for (size_t i = 0; i < shape_size(out_shape); ++i) {
auto count = index_to_count_map[i];
out[i] = out[i] / count;
}
}
} // namespace reference
} // namespace ov

View File

@ -1,51 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <cmath>
#include <limits>
#include <numeric>
#include "ngraph/shape_util.hpp"
#include "openvino/reference/utils/coordinate_transform.hpp"
#ifdef _WIN32
# undef min
#endif
namespace ov {
namespace reference {
template <typename T>
void min(const T* arg, T* out, const Shape& in_shape, const AxisSet& reduction_axes) {
T minval =
std::numeric_limits<T>::has_infinity ? std::numeric_limits<T>::infinity() : std::numeric_limits<T>::max();
constexpr bool dont_keep_dims_in_output = false;
OPENVINO_SUPPRESS_DEPRECATED_START
const auto out_shape = ngraph::reduce(in_shape, reduction_axes, dont_keep_dims_in_output);
std::fill(out, out + shape_size(out_shape), minval);
const auto in_strides = row_major_strides(in_shape);
const auto out_strides = row_major_strides(out_shape);
CoordinateTransformBasic input_transform(in_shape);
for (const Coordinate& input_coord : input_transform) {
const Coordinate output_coord = ngraph::reduce(input_coord, reduction_axes, dont_keep_dims_in_output);
const size_t in_idx =
std::inner_product(input_coord.begin(), input_coord.end(), in_strides.begin(), uint64_t(0));
const size_t out_idx =
std::inner_product(output_coord.begin(), output_coord.end(), out_strides.begin(), uint64_t(0));
const T x = arg[in_idx];
const T min = out[out_idx];
if (x < min) {
out[out_idx] = x;
}
}
OPENVINO_SUPPRESS_DEPRECATED_END
}
} // namespace reference
} // namespace ov

View File

@ -10,11 +10,11 @@
#include "openvino/reference/add.hpp"
#include "openvino/reference/divide.hpp"
#include "openvino/reference/mean.hpp"
#include "openvino/reference/multiply.hpp"
#include "openvino/reference/reduce_mean.hpp"
#include "openvino/reference/reduce_sum.hpp"
#include "openvino/reference/sqrt.hpp"
#include "openvino/reference/subtract.hpp"
#include "openvino/reference/sum.hpp"
namespace ov {
namespace reference {
@ -28,13 +28,13 @@ void mvn(const T* arg,
const double eps) {
auto reduced_shape = ngraph::reduce(in_shape, reduction_axes, true);
std::vector<T> tmp_buffer(shape_size(in_shape));
mean(arg, tmp_buffer.data(), in_shape, reduction_axes);
reduce_mean(arg, tmp_buffer.data(), in_shape, reduction_axes);
subtract(arg, tmp_buffer.data(), out, in_shape, reduced_shape, op::AutoBroadcastType::NUMPY);
if (normalize_variance) {
multiply(out, out, tmp_buffer.data(), shape_size(in_shape));
std::vector<T> mean_value(shape_size(reduced_shape));
mean(tmp_buffer.data(), mean_value.data(), in_shape, reduction_axes);
reduce_mean(tmp_buffer.data(), mean_value.data(), in_shape, reduction_axes);
add(mean_value.data(),
std::vector<T>(shape_size(reduced_shape), static_cast<T>(eps)).data(),
@ -58,13 +58,13 @@ void mvn_6(const T* arg,
op::MVNEpsMode eps_mode) {
auto reduced_shape = ngraph::reduce(in_shape, reduction_axes, true);
std::vector<T> tmp_buffer(shape_size(in_shape));
mean(arg, tmp_buffer.data(), in_shape, reduction_axes);
reduce_mean(arg, tmp_buffer.data(), in_shape, reduction_axes);
subtract(arg, tmp_buffer.data(), out, in_shape, reduced_shape, op::AutoBroadcastType::NUMPY);
if (normalize_variance) {
multiply(out, out, tmp_buffer.data(), shape_size(in_shape));
std::vector<T> mean_value(shape_size(reduced_shape));
mean(tmp_buffer.data(), mean_value.data(), in_shape, reduction_axes);
reduce_mean(tmp_buffer.data(), mean_value.data(), in_shape, reduction_axes);
if (eps_mode == op::MVNEpsMode::INSIDE_SQRT) {
add(mean_value.data(),

View File

@ -7,7 +7,7 @@
#include <ngraph/op/normalize_l2.hpp>
#include "openvino/reference/autobroadcast_binop.hpp"
#include "openvino/reference/sum.hpp"
#include "openvino/reference/reduce_sum.hpp"
namespace ov {
namespace reference {
@ -38,7 +38,7 @@ void normalize_l2(const T* data,
}
std::vector<T> sum_data(shape_size(reduce_shape));
sum(sqr_data.data(), sum_data.data(), data_shape, reduction_axes);
reduce_sum(sqr_data.data(), sum_data.data(), data_shape, reduction_axes);
autobroadcast_binop(data,
sum_data.data(),
out,

View File

@ -1,39 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <cmath>
#include <numeric>
#include "ngraph/shape_util.hpp"
#include "openvino/reference/utils/coordinate_transform.hpp"
namespace ov {
namespace reference {
template <typename T>
void product(const T* arg, T* out, const Shape& in_shape, const AxisSet& reduction_axes) {
constexpr bool dont_keep_dims_in_output = false;
OPENVINO_SUPPRESS_DEPRECATED_START
const auto out_shape = ngraph::reduce(in_shape, reduction_axes, dont_keep_dims_in_output);
std::fill(out, out + shape_size(out_shape), T(1));
const auto in_strides = row_major_strides(in_shape);
const auto out_strides = row_major_strides(out_shape);
CoordinateTransformBasic input_transform(in_shape);
for (const Coordinate& input_coord : input_transform) {
const Coordinate output_coord = ngraph::reduce(input_coord, reduction_axes, dont_keep_dims_in_output);
const size_t in_idx =
std::inner_product(input_coord.begin(), input_coord.end(), in_strides.begin(), uint64_t(0));
const size_t out_idx =
std::inner_product(output_coord.begin(), output_coord.end(), out_strides.begin(), uint64_t(0));
out[out_idx] = out[out_idx] * arg[in_idx];
}
OPENVINO_SUPPRESS_DEPRECATED_END
}
} // namespace reference
} // namespace ov

View File

@ -9,7 +9,7 @@
#include "openvino/core/shape_util.hpp"
#include "openvino/reference/abs.hpp"
#include "openvino/reference/sum.hpp"
#include "openvino/reference/reduce_sum.hpp"
#include "openvino/reference/utils/type_util.hpp"
namespace ov {

View File

@ -0,0 +1,46 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <cmath>
#include <limits>
#include <numeric>
#include "openvino/core/shape_util.hpp"
#include "openvino/reference/utils/coordinate_index.hpp"
#include "openvino/reference/utils/coordinate_transform.hpp"
namespace ov {
namespace reference {
/**
* @brief Reference implementation of ReduceMax operator.
*
* @param in Input pointer to data.
* @param out Output pointer to results.
* @param in_shape Input shape.
* @param reduction_axes Axes on which reduction is applied.
*/
template <class T>
void reduce_max(const T* in, T* out, const Shape& in_shape, const AxisSet& reduction_axes) {
constexpr auto min_value = std::numeric_limits<T>::lowest();
const auto out_shape = util::reduce(in_shape, reduction_axes);
std::fill(out, std::next(out, shape_size(out_shape)), min_value);
const auto in_strides = row_major_strides(in_shape);
const auto out_strides = row_major_strides(out_shape);
CoordinateTransformBasic input_transform(in_shape);
for (const auto& in_coord : input_transform) {
const auto out_coord = util::reduce(in_coord, reduction_axes);
const auto in_idx = coordinate_offset(in_coord, in_strides);
const auto out_idx = coordinate_offset(out_coord, out_strides);
out[out_idx] = std::max(out[out_idx], in[in_idx]);
}
}
} // namespace reference
} // namespace ov

View File

@ -0,0 +1,36 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <cmath>
#include <numeric>
#include "openvino/core/shape_util.hpp"
#include "openvino/reference/reduce_sum.hpp"
namespace ov {
namespace reference {
/**
* @brief Reference implementation of ReduceMean operator.
*
* @param in Input pointer to data.
* @param out Output pointer to results.
* @param in_shape Input shape.
* @param reduction_axes Axes on which reduction is applied.
*/
template <class T>
void reduce_mean(const T* in, T* out, const Shape& in_shape, const AxisSet& reduction_axes) {
reduce_sum(in, out, in_shape, reduction_axes);
const auto out_shape = util::reduce(in_shape, reduction_axes);
const auto out_size = shape_size(out_shape);
const auto count = static_cast<T>(shape_size(in_shape) / out_size);
std::transform(out, std::next(out, out_size), out, [count](const T value) {
return value / count;
});
}
} // namespace reference
} // namespace ov

View File

@ -0,0 +1,46 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <cmath>
#include <limits>
#include <numeric>
#include "openvino/core/shape_util.hpp"
#include "openvino/reference/utils/coordinate_index.hpp"
#include "openvino/reference/utils/coordinate_transform.hpp"
namespace ov {
namespace reference {
/**
* @brief Reference implementation of ReduceMin operator.
*
* @param in Input pointer to data.
* @param out Output pointer to results.
* @param in_shape Input shape.
* @param reduction_axes Axes on which reduction is applied.
*/
template <class T>
void reduce_min(const T* in, T* out, const Shape& in_shape, const AxisSet& reduction_axes) {
constexpr auto max_value =
std::numeric_limits<T>::has_infinity ? std::numeric_limits<T>::infinity() : std::numeric_limits<T>::max();
const auto out_shape = util::reduce(in_shape, reduction_axes);
std::fill(out, out + shape_size(out_shape), max_value);
const auto in_strides = row_major_strides(in_shape);
const auto out_strides = row_major_strides(out_shape);
CoordinateTransformBasic input_transform(in_shape);
for (const auto& in_coord : input_transform) {
const auto out_coord = util::reduce(in_coord, reduction_axes);
const auto in_idx = coordinate_offset(in_coord, in_strides);
const auto out_idx = coordinate_offset(out_coord, out_strides);
out[out_idx] = std::min(out[out_idx], in[in_idx]);
}
}
} // namespace reference
} // namespace ov

View File

@ -0,0 +1,42 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <cmath>
#include <numeric>
#include "openvino/core/shape_util.hpp"
#include "openvino/reference/utils/coordinate_index.hpp"
#include "openvino/reference/utils/coordinate_transform.hpp"
namespace ov {
namespace reference {
/**
* @brief Reference implementation of ReduceProduct operator.
*
* @param in Input pointer to data.
* @param out Output pointer to results.
* @param in_shape Input shape.
* @param reduction_axes Axes on which reduction is applied.
*/
template <typename T>
void reduce_prod(const T* arg, T* out, const Shape& in_shape, const AxisSet& reduction_axes) {
const auto out_shape = util::reduce(in_shape, reduction_axes);
std::fill(out, out + shape_size(out_shape), T(1));
const auto in_strides = row_major_strides(in_shape);
const auto out_strides = row_major_strides(out_shape);
CoordinateTransformBasic input_transform(in_shape);
for (const auto& in_coord : input_transform) {
const auto out_coord = util::reduce(in_coord, reduction_axes);
const auto in_idx = coordinate_offset(in_coord, in_strides);
const auto out_idx = coordinate_offset(out_coord, out_strides);
out[out_idx] *= arg[in_idx];
}
}
} // namespace reference
} // namespace ov

View File

@ -0,0 +1,94 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <cmath>
#include <numeric>
#include "openvino/core/shape_util.hpp"
#include "openvino/core/type/bfloat16.hpp"
#include "openvino/core/type/float16.hpp"
#include "openvino/reference/utils/coordinate_index.hpp"
#include "openvino/reference/utils/coordinate_transform.hpp"
#include "openvino/reference/utils/type_util.hpp"
namespace ov {
namespace reference {
namespace details {
template <typename T, typename std::enable_if<std::is_floating_point<T>::value, bool>::type = true>
bool isfinite(T x) {
return std::isfinite(x);
}
template <
typename T,
typename std::enable_if<std::is_same<T, bfloat16>::value || std::is_same<T, float16>::value, bool>::type = true>
bool isfinite(T x) {
return std::isfinite(static_cast<float>(x));
}
/**
* @brief Performs one element summation based on Kahan algorithm to significantly reduce (integral types).
*
* @param in Value to add with previous value of summation.
* @param prev_sum Previous value of summation (accumulator).
* @return Compensate sum.
*/
template <class T, typename std::enable_if<std::is_integral<T>::value>::type* = nullptr>
constexpr T kahan_summation(const T in, const T prev_sum, T&) {
return in + prev_sum;
}
/**
* @brief Performs one element summation based on Kahan algorithm to significantly reduce (floating point types).
*
* @param in Value to add with previous value of summation.
* @param prev_sum Previous value of summation (accumulator).
* @param compensation Accumulates the summation error.
* @return Compensate sum.
*/
template <class T, typename std::enable_if<ov::is_floating_point<T>()>::type* = nullptr>
T kahan_summation(const T in, const T prev_sum, T& compensation) {
if (isfinite(in) && isfinite(prev_sum)) {
T temp = prev_sum + (in - compensation);
compensation = (temp - prev_sum) - (in - compensation);
return temp;
} else {
return in + prev_sum;
}
}
} // namespace details
/**
* @brief Reference implementation of ReduceSum operator.
*
* @param in Input pointer to data.
* @param out Output pointer to results.
* @param in_shape Input shape.
* @param reduction_axes Axes on which reduction is applied.
*/
template <typename T>
void reduce_sum(const T* in, T* out, const Shape& in_shape, const AxisSet& reduction_axes) {
const auto out_shape = util::reduce(in_shape, reduction_axes);
const auto out_size = shape_size(out_shape);
std::vector<T> cs(out_size, T{0});
std::fill(out, std::next(out, out_size), T{0});
const auto in_strides = row_major_strides(in_shape);
const auto out_strides = row_major_strides(out_shape);
CoordinateTransformBasic input_transform(in_shape);
for (const auto& in_coord : input_transform) {
const auto out_coord = util::reduce(in_coord, reduction_axes);
const auto in_idx = coordinate_offset(in_coord, in_strides);
const auto out_idx = coordinate_offset(out_coord, out_strides);
out[out_idx] = details::kahan_summation(in[in_idx], out[out_idx], cs[out_idx]);
}
}
} // namespace reference
} // namespace ov

View File

@ -7,8 +7,8 @@
#include <cmath>
#include "ngraph/shape_util.hpp"
#include "openvino/reference/max.hpp"
#include "openvino/reference/sum.hpp"
#include "openvino/reference/reduce_max.hpp"
#include "openvino/reference/reduce_sum.hpp"
#include "openvino/reference/utils/coordinate_transform.hpp"
namespace ov {
@ -20,7 +20,7 @@ void softmax(const T* arg, T* out, const Shape& shape, const AxisSet& axes) {
auto temp_elements = shape_size(temp_shape);
auto temp_ptr = new T[temp_elements];
max(arg, temp_ptr, shape, axes);
reduce_max(arg, temp_ptr, shape, axes);
CoordinateTransform transform(shape);
CoordinateTransform temp_transform(temp_shape);
@ -30,7 +30,7 @@ void softmax(const T* arg, T* out, const Shape& shape, const AxisSet& axes) {
std::exp(arg[transform.index(coord)] - temp_ptr[temp_transform.index(temp_coord)]);
}
sum(out, temp_ptr, shape, axes);
reduce_sum(out, temp_ptr, shape, axes);
for (const Coordinate& coord : transform) {
Coordinate temp_coord = ngraph::reduce(coord, axes, true);

View File

@ -1,84 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <cmath>
#include <numeric>
#include "ngraph/shape_util.hpp"
#include "ngraph/type/bfloat16.hpp"
#include "ngraph/type/float16.hpp"
#include "openvino/reference/utils/coordinate_transform.hpp"
namespace ov {
namespace reference {
namespace details {
// Windows doesn't seem to like it if we directly use std::isfinite on integer
// types, so we will roll our own thing here.
template <typename T, typename std::enable_if<std::is_floating_point<T>::value, bool>::type = true>
bool is_finite(T x) {
return std::isfinite(x);
}
template <
typename T,
typename std::enable_if<std::is_same<T, bfloat16>::value || std::is_same<T, float16>::value, bool>::type = true>
bool is_finite(T x) {
return std::isfinite(static_cast<float>(x));
}
template <typename T, typename std::enable_if<std::is_integral<T>::value, bool>::type = true>
bool is_finite(T /* x */) {
return true;
}
///
/// \brief Performs one element summation based on Kahan algorithm to
/// significantly reduce
/// the numerical error.
///
/// \param[in] elem Element to add into the accumulator.
/// \param compensation Variable that accumulates the error.
/// \param sum Result of compensated summation.
///
template <typename T>
void kahan_summation(const T& elem, T& compensation, T& sum) {
if (is_finite(elem) && is_finite(sum)) {
T temp = sum + (elem - compensation);
compensation = (temp - sum) - (elem - compensation);
sum = temp;
} else {
sum = sum + elem;
}
}
} // namespace details
template <typename T>
void sum(const T* arg, T* out, const Shape& in_shape, const AxisSet& reduction_axes) {
constexpr bool dont_keep_dims_in_output = false;
NGRAPH_SUPPRESS_DEPRECATED_START
const auto out_shape = ngraph::reduce(in_shape, reduction_axes, dont_keep_dims_in_output);
std::vector<T> cs(shape_size(out_shape), 0);
std::fill(out, out + shape_size(out_shape), T(0));
const auto in_strides = row_major_strides(in_shape);
const auto out_strides = row_major_strides(out_shape);
CoordinateTransformBasic input_transform(in_shape);
for (const Coordinate& input_coord : input_transform) {
const Coordinate output_coord = ngraph::reduce(input_coord, reduction_axes, dont_keep_dims_in_output);
const size_t in_idx =
std::inner_product(input_coord.begin(), input_coord.end(), in_strides.begin(), uint64_t(0));
const size_t out_idx =
std::inner_product(output_coord.begin(), output_coord.end(), out_strides.begin(), uint64_t(0));
details::kahan_summation(arg[in_idx], cs[out_idx], out[out_idx]);
}
NGRAPH_SUPPRESS_DEPRECATED_END
}
} // namespace reference
} // namespace ov

View File

@ -10,8 +10,8 @@
#include "openvino/reference/broadcast.hpp"
#include "openvino/reference/matmul.hpp"
#include "openvino/reference/multiply.hpp"
#include "openvino/reference/reduce_sum.hpp"
#include "openvino/reference/reshape.hpp"
#include "openvino/reference/sum.hpp"
#include "openvino/reference/transpose.hpp"
#include "openvino/reference/utils/span.hpp"
@ -352,7 +352,7 @@ void reduce_input(ov::TensorVector& inputs,
auto output_ptr = ov::Tensor(input_ptr.get_element_type(), output_shape);
reference::sum<T>(input_ptr.data<T>(), output_ptr.data<T>(), input_shape, reduced_axes);
reference::reduce_sum(input_ptr.data<T>(), output_ptr.data<T>(), input_shape, reduced_axes);
// update a vector of inputs and input subscripts
inputs[input_ind] = output_ptr;
@ -595,7 +595,7 @@ void extract_diagonal(ov::TensorVector& inputs, std::vector<std::string>& input_
ov::op::AutoBroadcastType::NUMPY);
auto result = ov::Tensor(input_ptr.get_element_type(), result_shape);
reference::sum<T>(mul_output.data<T>(), result.data<T>(), mul_output.get_shape(), reduced_axes);
reference::reduce_sum(mul_output.data<T>(), result.data<T>(), mul_output.get_shape(), reduced_axes);
inputs[input_ind] = result;
input_subscripts[input_ind] = resultant_subscript;
}

View File

@ -14,6 +14,7 @@
#include "ov_optional.hpp"
#include "shape_infer_type_utils.hpp"
#include "tensor_data_accessor.hpp"
#include "validation_util.hpp"
namespace ov {
@ -277,10 +278,8 @@ std::unique_ptr<TRes> get_input_const_data_as(const ov::Node* op,
UnaryOperation&& func = ov::util::Cast<TData>()) {
if (auto t = tensor_accessor(idx)) {
return std::unique_ptr<TRes>(new TRes(get_tensor_data_as<TData, TRes>(t, std::forward<UnaryOperation>(func))));
OPENVINO_SUPPRESS_DEPRECATED_START
} else if (const auto& constant =
(idx < op->get_input_size()) ? ov::get_constant_from_source(op->input_value(idx)) : nullptr) {
OPENVINO_SUPPRESS_DEPRECATED_END
(idx < op->get_input_size()) ? ov::util::get_constant_from_source(op->input_value(idx)) : nullptr) {
const auto& et = constant->get_element_type();
const auto& shape = constant->get_shape();
return std::unique_ptr<TRes>(new TRes(get_raw_data_as<TData, TRes>(et,
@ -358,9 +357,7 @@ ov::optional<TShape> get_input_const_data_as_shape(const ov::Node* op,
inline element::Type get_input_const_element_type(const ov::Node* const op, size_t port, const ITensorAccessor& ta) {
if (auto t = ta(port)) {
return t.get_element_type();
OPENVINO_SUPPRESS_DEPRECATED_START
} else if (const auto& constant = ov::get_constant_from_source(op->input_value(port))) {
OPENVINO_SUPPRESS_DEPRECATED_END
} else if (const auto& constant = ov::util::get_constant_from_source(op->input_value(port))) {
return constant->get_element_type();
} else {
return element::undefined;

View File

@ -21,7 +21,7 @@ struct Evaluate : element::NoAction<bool> {
template <element::Type_t ET>
static result_type visit(const Tensor& in0, Tensor& out, const AxisSet& reduction_axes) {
using T = fundamental_type_for<ET>;
reference::reduce_l1(in0.data<T>(), out.data<T>(), in0.get_shape(), reduction_axes);
reference::reduce_l1(in0.data<const T>(), out.data<T>(), in0.get_shape(), reduction_axes);
return true;
}
};
@ -48,7 +48,7 @@ bool ReduceL1::evaluate(TensorVector& outputs, const TensorVector& inputs) const
outputs[0].set_shape(ov::util::reduce(inputs[0].get_shape(), reduction_axes, get_keep_dims()));
using namespace ov::element;
return IfTypeOf<i32, i64, bf16, f16, f32>::apply<reduce_l1::Evaluate>(inputs[0].get_element_type(),
return IfTypeOf<bf16, f16, f32, i32, i64>::apply<reduce_l1::Evaluate>(inputs[0].get_element_type(),
inputs[0],
outputs[0],
reduction_axes);

View File

@ -20,7 +20,7 @@ struct Evaluate : element::NoAction<bool> {
template <element::Type_t ET>
static result_type visit(const Tensor& in0, Tensor& out, const AxisSet& reduction_axes) {
using T = fundamental_type_for<ET>;
reference::reduce_l2(in0.data<T>(), out.data<T>(), in0.get_shape(), reduction_axes);
reference::reduce_l2(in0.data<const T>(), out.data<T>(), in0.get_shape(), reduction_axes);
return true;
}
};

View File

@ -2,98 +2,81 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <ngraph/validation_util.hpp>
#include "openvino/op/reduce_max.hpp"
#include "bound_evaluate.hpp"
#include "element_visitor.hpp"
#include "itt.hpp"
#include "ngraph/graph_util.hpp"
#include "ngraph/op/max.hpp"
#include "ngraph/op/util/evaluate_helpers.hpp"
#include "ngraph/runtime/host_tensor.hpp"
#include "ngraph/shape_util.hpp"
#include "openvino/reference/max.hpp"
#include "openvino/core/shape_util.hpp"
#include "openvino/op/util/axes_util.hpp"
#include "openvino/reference/reduce_max.hpp"
using namespace std;
using namespace ngraph;
namespace ov {
namespace op {
namespace reduce_max {
struct Evaluate : element::NoAction<bool> {
using element::NoAction<bool>::visit;
OPENVINO_SUPPRESS_DEPRECATED_START
namespace maxop {
namespace {
template <element::Type_t ET>
bool evaluate(const HostTensorPtr& arg, const HostTensorPtr& out, const AxisSet& axes, bool keep_dims) {
OPENVINO_SUPPRESS_DEPRECATED_START
out->set_shape(reduce(arg->get_shape(), axes, keep_dims));
OPENVINO_SUPPRESS_DEPRECATED_END
ov::reference::max(arg->get_data_ptr<ET>(), out->get_data_ptr<ET>(), arg->get_shape(), axes);
return true;
}
bool evaluate_max(const HostTensorPtr& arg, const HostTensorPtr& out, const AxisSet& axes, bool keep_dims) {
bool rc = true;
switch (arg->get_element_type()) {
NGRAPH_TYPE_CASE(evaluate_max, i32, arg, out, axes, keep_dims);
NGRAPH_TYPE_CASE(evaluate_max, i64, arg, out, axes, keep_dims);
NGRAPH_TYPE_CASE(evaluate_max, u32, arg, out, axes, keep_dims);
NGRAPH_TYPE_CASE(evaluate_max, u64, arg, out, axes, keep_dims);
NGRAPH_TYPE_CASE(evaluate_max, f16, arg, out, axes, keep_dims);
NGRAPH_TYPE_CASE(evaluate_max, f32, arg, out, axes, keep_dims);
NGRAPH_TYPE_CASE(evaluate_max, u8, arg, out, axes, keep_dims);
NGRAPH_TYPE_CASE(evaluate_max, i8, arg, out, axes, keep_dims);
default:
rc = false;
break;
template <element::Type_t ET>
static result_type visit(const Tensor& in0, Tensor& out, const AxisSet& reduction_axes) {
using T = fundamental_type_for<ET>;
reference::reduce_max(in0.data<const T>(), out.data<T>(), in0.get_shape(), reduction_axes);
return true;
}
return rc;
}
} // namespace
} // namespace maxop
};
} // namespace reduce_max
namespace v1 {
op::v1::ReduceMax::ReduceMax(const Output<Node>& arg, const Output<Node>& reduction_axes, bool keep_dims)
ReduceMax::ReduceMax(const Output<Node>& arg, const Output<Node>& reduction_axes, bool keep_dims)
: ArithmeticReductionKeepDims(arg, reduction_axes, keep_dims) {
constructor_validate_and_infer_types();
}
shared_ptr<Node> op::v1::ReduceMax::clone_with_new_inputs(const OutputVector& new_args) const {
std::shared_ptr<Node> ReduceMax::clone_with_new_inputs(const OutputVector& new_args) const {
OV_OP_SCOPE(v1_ReduceMax_clone_with_new_inputs);
check_new_args_count(this, new_args);
return make_shared<op::v1::ReduceMax>(new_args.at(0), new_args.at(1), get_keep_dims());
return std::make_shared<ReduceMax>(new_args.at(0), new_args.at(1), get_keep_dims());
}
bool op::v1::ReduceMax::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const {
bool ReduceMax::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
OV_OP_SCOPE(v1_ReduceMax_evaluate);
OPENVINO_SUPPRESS_DEPRECATED_START
OPENVINO_ASSERT(validate_host_tensor_vector(inputs, 2));
OPENVINO_ASSERT(validate_host_tensor_vector(outputs, 1));
OPENVINO_ASSERT(outputs.size() == 1);
OPENVINO_ASSERT(inputs.size() == 2);
const auto reduction_axes =
get_normalized_axes_from_tensor(inputs[1], inputs[0]->get_partial_shape().rank(), get_friendly_name());
OPENVINO_SUPPRESS_DEPRECATED_END
const auto reduction_axes = get_normalized_axes_from_tensor(this, inputs[1], inputs[0].get_shape().size());
outputs[0].set_shape(ov::util::reduce(inputs[0].get_shape(), reduction_axes, get_keep_dims()));
return maxop::evaluate_max(inputs[0], outputs[0], reduction_axes, get_keep_dims());
using namespace ov::element;
return IfTypeOf<f16, f32, i8, i32, i64, u8, u32, u64>::apply<reduce_max::Evaluate>(inputs[0].get_element_type(),
inputs[0],
outputs[0],
reduction_axes);
}
bool op::v1::ReduceMax::has_evaluate() const {
bool ReduceMax::has_evaluate() const {
OV_OP_SCOPE(v1_ReduceMax_has_evaluate);
switch (get_input_element_type(0)) {
case ngraph::element::i32:
case ngraph::element::i64:
case ngraph::element::u32:
case ngraph::element::u64:
case ngraph::element::f16:
case ngraph::element::f32:
case ngraph::element::i8:
case ngraph::element::u8:
case element::i32:
case element::i64:
case element::u32:
case element::u64:
case element::f16:
case element::f32:
case element::i8:
case element::u8:
return true;
default:
break;
return false;
}
return false;
}
bool op::v1::ReduceMax::evaluate_lower(ov::TensorVector& output_values) const {
bool ReduceMax::evaluate_lower(ov::TensorVector& output_values) const {
return input_value(1).get_tensor().has_and_set_bound() && default_lower_bound_evaluator(this, output_values);
}
bool op::v1::ReduceMax::evaluate_upper(ov::TensorVector& output_values) const {
bool ReduceMax::evaluate_upper(ov::TensorVector& output_values) const {
return input_value(1).get_tensor().has_and_set_bound() && default_upper_bound_evaluator(this, output_values);
}
} // namespace v1
} // namespace op
} // namespace ov

View File

@ -2,87 +2,69 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph/op/reduce_mean.hpp"
#include <ngraph/validation_util.hpp>
#include "openvino/op/reduce_mean.hpp"
#include "element_visitor.hpp"
#include "itt.hpp"
#include "ngraph/graph_util.hpp"
#include "ngraph/op/broadcast.hpp"
#include "ngraph/op/util/evaluate_helpers.hpp"
#include "ngraph/runtime/host_tensor.hpp"
#include "ngraph/shape_util.hpp"
#include "openvino/reference/mean.hpp"
#include "openvino/core/shape_util.hpp"
#include "openvino/op/util/axes_util.hpp"
#include "openvino/reference/reduce_mean.hpp"
using namespace std;
using namespace ngraph;
namespace ov {
namespace op {
namespace reduce_mean {
struct Evaluate : element::NoAction<bool> {
using element::NoAction<bool>::visit;
op::v1::ReduceMean::ReduceMean(const Output<Node>& arg, const Output<Node>& reduction_axes, bool keep_dims)
template <element::Type_t ET>
static result_type visit(const Tensor& in0, Tensor& out, const AxisSet& reduction_axes) {
using T = fundamental_type_for<ET>;
reference::reduce_mean(in0.data<const T>(), out.data<T>(), in0.get_shape(), reduction_axes);
return true;
}
};
} // namespace reduce_mean
namespace v1 {
ReduceMean::ReduceMean(const Output<Node>& arg, const Output<Node>& reduction_axes, bool keep_dims)
: ArithmeticReductionKeepDims(arg, reduction_axes, keep_dims) {
constructor_validate_and_infer_types();
}
shared_ptr<Node> op::v1::ReduceMean::clone_with_new_inputs(const OutputVector& new_args) const {
std::shared_ptr<Node> ReduceMean::clone_with_new_inputs(const OutputVector& new_args) const {
OV_OP_SCOPE(v1_ReduceMean_clone_with_new_inputs);
check_new_args_count(this, new_args);
return make_shared<op::v1::ReduceMean>(new_args.at(0), new_args.at(1), get_keep_dims());
return std::make_shared<ReduceMean>(new_args.at(0), new_args.at(1), get_keep_dims());
}
OPENVINO_SUPPRESS_DEPRECATED_START
namespace mean {
namespace {
template <element::Type_t ET>
bool evaluate(const HostTensorPtr& arg, const HostTensorPtr& out, const AxisSet& axes, bool keep_dims) {
OPENVINO_SUPPRESS_DEPRECATED_START
out->set_shape(reduce(arg->get_shape(), axes, keep_dims));
OPENVINO_SUPPRESS_DEPRECATED_END
ov::reference::mean(arg->get_data_ptr<ET>(), out->get_data_ptr<ET>(), arg->get_shape(), axes);
return true;
}
bool evaluate_mean(const HostTensorPtr& arg, const HostTensorPtr& out, const AxisSet& axes, bool keep_dims) {
bool rc = true;
switch (arg->get_element_type()) {
NGRAPH_TYPE_CASE(evaluate_mean, i32, arg, out, axes, keep_dims);
NGRAPH_TYPE_CASE(evaluate_mean, i64, arg, out, axes, keep_dims);
NGRAPH_TYPE_CASE(evaluate_mean, u32, arg, out, axes, keep_dims);
NGRAPH_TYPE_CASE(evaluate_mean, u64, arg, out, axes, keep_dims);
NGRAPH_TYPE_CASE(evaluate_mean, f16, arg, out, axes, keep_dims);
NGRAPH_TYPE_CASE(evaluate_mean, f32, arg, out, axes, keep_dims);
default:
rc = false;
break;
}
return rc;
}
} // namespace
} // namespace mean
bool op::v1::ReduceMean::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const {
bool ReduceMean::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
OV_OP_SCOPE(v1_ReduceMean_evaluate);
OPENVINO_SUPPRESS_DEPRECATED_START
OPENVINO_ASSERT(validate_host_tensor_vector(inputs, 2));
OPENVINO_ASSERT(validate_host_tensor_vector(outputs, 1));
OPENVINO_ASSERT(outputs.size() == 1);
OPENVINO_ASSERT(inputs.size() == 2);
const auto reduction_axes =
get_normalized_axes_from_tensor(inputs[1], inputs[0]->get_partial_shape().rank(), get_friendly_name());
OPENVINO_SUPPRESS_DEPRECATED_END
const auto reduction_axes = get_normalized_axes_from_tensor(this, inputs[1], inputs[0].get_shape().size());
outputs[0].set_shape(ov::util::reduce(inputs[0].get_shape(), reduction_axes, get_keep_dims()));
return mean::evaluate_mean(inputs[0], outputs[0], reduction_axes, get_keep_dims());
using namespace ov::element;
return IfTypeOf<f16, f32, i32, i64, u32, u64>::apply<reduce_mean::Evaluate>(inputs[0].get_element_type(),
inputs[0],
outputs[0],
reduction_axes);
}
bool op::v1::ReduceMean::has_evaluate() const {
bool ReduceMean::has_evaluate() const {
OV_OP_SCOPE(v1_ReduceMean_has_evaluate);
switch (get_input_element_type(0)) {
case ngraph::element::i32:
case ngraph::element::i64:
case ngraph::element::u32:
case ngraph::element::u64:
case ngraph::element::f16:
case ngraph::element::f32:
case element::f16:
case element::f32:
case element::i32:
case element::i64:
case element::u32:
case element::u64:
return true;
default:
break;
return false;
}
return false;
}
} // namespace v1
} // namespace op
} // namespace ov

View File

@ -2,97 +2,79 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <ngraph/validation_util.hpp>
#include "openvino/op/reduce_min.hpp"
#include "bound_evaluate.hpp"
#include "element_visitor.hpp"
#include "itt.hpp"
#include "ngraph/op/min.hpp"
#include "ngraph/op/util/evaluate_helpers.hpp"
#include "ngraph/runtime/host_tensor.hpp"
#include "ngraph/shape_util.hpp"
#include "openvino/reference/min.hpp"
#include "openvino/op/util/axes_util.hpp"
#include "openvino/reference/reduce_min.hpp"
using namespace std;
using namespace ngraph;
namespace ov {
namespace op {
namespace reduce_min {
struct Evaluate : element::NoAction<bool> {
using element::NoAction<bool>::visit;
OPENVINO_SUPPRESS_DEPRECATED_START
namespace minop {
namespace {
template <element::Type_t ET>
bool evaluate(const HostTensorPtr& arg, const HostTensorPtr& out, const AxisSet& axes, const bool keep_dims) {
OPENVINO_SUPPRESS_DEPRECATED_START
out->set_shape(reduce(arg->get_shape(), axes, keep_dims));
OPENVINO_SUPPRESS_DEPRECATED_END
ov::reference::min(arg->get_data_ptr<ET>(), out->get_data_ptr<ET>(), arg->get_shape(), axes);
return true;
}
bool evaluate_min(const HostTensorPtr& arg, const HostTensorPtr& out, const AxisSet& axes, const bool keep_dims) {
bool rc = true;
switch (arg->get_element_type()) {
NGRAPH_TYPE_CASE(evaluate_min, i32, arg, out, axes, keep_dims);
NGRAPH_TYPE_CASE(evaluate_min, i64, arg, out, axes, keep_dims);
NGRAPH_TYPE_CASE(evaluate_min, u32, arg, out, axes, keep_dims);
NGRAPH_TYPE_CASE(evaluate_min, u64, arg, out, axes, keep_dims);
NGRAPH_TYPE_CASE(evaluate_min, f16, arg, out, axes, keep_dims);
NGRAPH_TYPE_CASE(evaluate_min, f32, arg, out, axes, keep_dims);
NGRAPH_TYPE_CASE(evaluate_min, i8, arg, out, axes, keep_dims);
NGRAPH_TYPE_CASE(evaluate_min, u8, arg, out, axes, keep_dims);
default:
rc = false;
break;
template <element::Type_t ET>
static result_type visit(const Tensor& in0, Tensor& out, const AxisSet& reduction_axes) {
using T = fundamental_type_for<ET>;
reference::reduce_min(in0.data<const T>(), out.data<T>(), in0.get_shape(), reduction_axes);
return true;
}
return rc;
}
} // namespace
} // namespace minop
op::v1::ReduceMin::ReduceMin(const Output<Node>& arg, const Output<Node>& reduction_axes, bool keep_dims)
};
} // namespace reduce_min
namespace v1 {
ReduceMin::ReduceMin(const Output<Node>& arg, const Output<Node>& reduction_axes, bool keep_dims)
: ArithmeticReductionKeepDims(arg, reduction_axes, keep_dims) {
constructor_validate_and_infer_types();
}
shared_ptr<Node> op::v1::ReduceMin::clone_with_new_inputs(const OutputVector& new_args) const {
std::shared_ptr<Node> ReduceMin::clone_with_new_inputs(const OutputVector& new_args) const {
OV_OP_SCOPE(v1_ReduceMin_clone_with_new_inputs);
check_new_args_count(this, new_args);
return make_shared<op::v1::ReduceMin>(new_args.at(0), new_args.at(1), get_keep_dims());
return std::make_shared<ReduceMin>(new_args.at(0), new_args.at(1), get_keep_dims());
}
bool op::v1::ReduceMin::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const {
bool ReduceMin::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
OV_OP_SCOPE(v1_ReduceMin_evaluate);
OPENVINO_SUPPRESS_DEPRECATED_START
OPENVINO_ASSERT(validate_host_tensor_vector(inputs, 2));
OPENVINO_ASSERT(validate_host_tensor_vector(outputs, 1));
OPENVINO_ASSERT(outputs.size() == 1);
OPENVINO_ASSERT(inputs.size() == 2);
const auto reduction_axes =
get_normalized_axes_from_tensor(inputs[1], inputs[0]->get_partial_shape().rank(), get_friendly_name());
OPENVINO_SUPPRESS_DEPRECATED_END
const auto reduction_axes = get_normalized_axes_from_tensor(this, inputs[1], inputs[0].get_shape().size());
outputs[0].set_shape(ov::util::reduce(inputs[0].get_shape(), reduction_axes, get_keep_dims()));
return minop::evaluate_min(inputs[0], outputs[0], reduction_axes, get_keep_dims());
using namespace ov::element;
return IfTypeOf<f16, f32, i8, i32, i64, u8, u32, u64>::apply<reduce_min::Evaluate>(inputs[0].get_element_type(),
inputs[0],
outputs[0],
reduction_axes);
}
bool op::v1::ReduceMin::has_evaluate() const {
bool ReduceMin::has_evaluate() const {
OV_OP_SCOPE(v1_ReduceMin_has_evaluate);
switch (get_input_element_type(0)) {
case ngraph::element::i8:
case ngraph::element::u8:
case ngraph::element::i32:
case ngraph::element::i64:
case ngraph::element::u32:
case ngraph::element::u64:
case ngraph::element::f16:
case ngraph::element::f32:
case element::i8:
case element::u8:
case element::i32:
case element::i64:
case element::u32:
case element::u64:
case element::f16:
case element::f32:
return true;
default:
break;
return false;
}
return false;
}
bool op::v1::ReduceMin::evaluate_lower(ov::TensorVector& output_values) const {
bool ReduceMin::evaluate_lower(ov::TensorVector& output_values) const {
return input_value(1).get_tensor().has_and_set_bound() && default_lower_bound_evaluator(this, output_values);
}
bool op::v1::ReduceMin::evaluate_upper(ov::TensorVector& output_values) const {
bool ReduceMin::evaluate_upper(ov::TensorVector& output_values) const {
return input_value(1).get_tensor().has_and_set_bound() && default_upper_bound_evaluator(this, output_values);
}
} // namespace v1
} // namespace op
} // namespace ov

View File

@ -2,105 +2,90 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph/op/reduce_prod.hpp"
#include "openvino/op/reduce_prod.hpp"
#include "bound_evaluate.hpp"
#include "element_visitor.hpp"
#include "itt.hpp"
#include "ngraph/graph_util.hpp"
#include "ngraph/op/util/evaluate_helpers.hpp"
#include "ngraph/runtime/host_tensor.hpp"
#include "ngraph/shape_util.hpp"
#include "openvino/reference/product.hpp"
#include "openvino/core/shape_util.hpp"
#include "openvino/op/util/axes_util.hpp"
#include "openvino/reference/reduce_prod.hpp"
using namespace std;
using namespace ngraph;
namespace ov {
namespace op {
namespace reduce_prod {
namespace {
bool has_positive_bounds_on_data(const Node* const op) {
const auto& lb = op->get_input_tensor(0).get_lower_value();
const auto& ub = op->get_input_tensor(0).get_upper_value();
op::v1::ReduceProd::ReduceProd(const Output<Node>& arg, const Output<Node>& reduction_axes, bool keep_dims)
return lb && ub && tensor_is_positive(lb) && tensor_is_positive(ub);
}
} // namespace
struct Evaluate : element::NoAction<bool> {
using element::NoAction<bool>::visit;
template <element::Type_t ET>
static result_type visit(const Tensor& in0, Tensor& out, const AxisSet& reduction_axes) {
using T = fundamental_type_for<ET>;
reference::reduce_prod(in0.data<const T>(), out.data<T>(), in0.get_shape(), reduction_axes);
return true;
}
};
} // namespace reduce_prod
namespace v1 {
ReduceProd::ReduceProd(const Output<Node>& arg, const Output<Node>& reduction_axes, bool keep_dims)
: ArithmeticReductionKeepDims(arg, reduction_axes, keep_dims) {
constructor_validate_and_infer_types();
}
shared_ptr<Node> op::v1::ReduceProd::clone_with_new_inputs(const OutputVector& new_args) const {
std::shared_ptr<Node> ReduceProd::clone_with_new_inputs(const OutputVector& new_args) const {
OV_OP_SCOPE(v1_ReduceProd_clone_with_new_inputs);
check_new_args_count(this, new_args);
return make_shared<ReduceProd>(new_args.at(0), new_args.at(1), get_keep_dims());
return std::make_shared<ReduceProd>(new_args.at(0), new_args.at(1), get_keep_dims());
}
OPENVINO_SUPPRESS_DEPRECATED_START
namespace reduce_prod {
namespace {
template <element::Type_t ET>
bool evaluate(const HostTensorPtr& arg, const HostTensorPtr& out, const AxisSet& axes, bool keep_dims) {
OPENVINO_SUPPRESS_DEPRECATED_START
out->set_shape(reduce(arg->get_shape(), axes, keep_dims));
OPENVINO_SUPPRESS_DEPRECATED_END
ov::reference::product(arg->get_data_ptr<ET>(), out->get_data_ptr<ET>(), arg->get_shape(), axes);
return true;
}
bool evaluate_product(const HostTensorPtr& arg, const HostTensorPtr& out, const AxisSet& axes, bool keep_dims) {
bool rc = true;
switch (arg->get_element_type()) {
NGRAPH_TYPE_CASE(evaluate_product, i32, arg, out, axes, keep_dims);
NGRAPH_TYPE_CASE(evaluate_product, i64, arg, out, axes, keep_dims);
NGRAPH_TYPE_CASE(evaluate_product, u32, arg, out, axes, keep_dims);
NGRAPH_TYPE_CASE(evaluate_product, u64, arg, out, axes, keep_dims);
NGRAPH_TYPE_CASE(evaluate_product, f16, arg, out, axes, keep_dims);
NGRAPH_TYPE_CASE(evaluate_product, f32, arg, out, axes, keep_dims);
default:
rc = false;
break;
}
return rc;
} // namespace
} // namespace
} // namespace reduce_prod
bool op::v1::ReduceProd::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const {
bool ReduceProd::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
OV_OP_SCOPE(v1_ReduceProd_evaluate);
OPENVINO_SUPPRESS_DEPRECATED_START
OPENVINO_ASSERT(validate_host_tensor_vector(inputs, 2));
OPENVINO_ASSERT(validate_host_tensor_vector(outputs, 1));
OPENVINO_ASSERT(outputs.size() == 1);
OPENVINO_ASSERT(inputs.size() == 2);
const auto reduction_axes =
get_normalized_axes_from_tensor(inputs[1], inputs[0]->get_partial_shape().rank(), get_friendly_name());
OPENVINO_SUPPRESS_DEPRECATED_END
const auto reduction_axes = get_normalized_axes_from_tensor(this, inputs[1], inputs[0].get_shape().size());
outputs[0].set_shape(ov::util::reduce(inputs[0].get_shape(), reduction_axes, get_keep_dims()));
return reduce_prod::evaluate_product(inputs[0], outputs[0], reduction_axes, get_keep_dims());
using namespace ov::element;
return IfTypeOf<f16, f32, i32, i64, u32, u64>::apply<reduce_prod::Evaluate>(inputs[0].get_element_type(),
inputs[0],
outputs[0],
reduction_axes);
}
bool op::v1::ReduceProd::has_evaluate() const {
bool ReduceProd::has_evaluate() const {
OV_OP_SCOPE(v1_ReduceProd_has_evaluate);
switch (get_input_element_type(0)) {
case ngraph::element::i32:
case ngraph::element::i64:
case ngraph::element::u32:
case ngraph::element::u64:
case ngraph::element::f16:
case ngraph::element::f32:
case element::i32:
case element::i64:
case element::u32:
case element::u64:
case element::f16:
case element::f32:
return true;
default:
break;
return false;
}
return false;
}
bool op::v1::ReduceProd::evaluate_lower(ov::TensorVector& output_values) const {
if (!input_value(1).get_tensor().has_and_set_bound())
return false;
const auto &lb = input_value(0).get_tensor().get_lower_value(), ub = input_value(0).get_tensor().get_upper_value();
if (!lb || !ub || !tensor_is_positive(lb) || !tensor_is_positive(ub))
return false;
return default_lower_bound_evaluator(this, output_values);
bool ReduceProd::evaluate_lower(ov::TensorVector& output_values) const {
return reduce_prod::has_positive_bounds_on_data(this) && get_input_tensor(1).has_and_set_bound() &&
default_lower_bound_evaluator(this, output_values);
}
bool op::v1::ReduceProd::evaluate_upper(ov::TensorVector& output_values) const {
if (!input_value(1).get_tensor().has_and_set_bound())
return false;
const auto &lb = input_value(0).get_tensor().get_lower_value(), ub = input_value(0).get_tensor().get_upper_value();
if (!lb || !ub || !tensor_is_positive(lb) || !tensor_is_positive(ub))
return false;
return default_upper_bound_evaluator(this, output_values);
bool ReduceProd::evaluate_upper(ov::TensorVector& output_values) const {
return reduce_prod::has_positive_bounds_on_data(this) && get_input_tensor(1).has_and_set_bound() &&
default_upper_bound_evaluator(this, output_values);
}
} // namespace v1
} // namespace op
} // namespace ov

View File

@ -2,88 +2,69 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph/op/reduce_sum.hpp"
#include <ngraph/validation_util.hpp>
#include "openvino/op/reduce_sum.hpp"
#include "element_visitor.hpp"
#include "itt.hpp"
#include "ngraph/graph_util.hpp"
#include "ngraph/op/broadcast.hpp"
#include "ngraph/op/util/evaluate_helpers.hpp"
#include "ngraph/op/util/op_types.hpp"
#include "ngraph/runtime/host_tensor.hpp"
#include "ngraph/shape_util.hpp"
#include "openvino/reference/sum.hpp"
#include "openvino/core/shape_util.hpp"
#include "openvino/op/util/axes_util.hpp"
#include "openvino/reference/reduce_sum.hpp"
using namespace std;
using namespace ngraph;
namespace ov {
namespace op {
namespace reduce_sum {
struct Evaluate : element::NoAction<bool> {
using element::NoAction<bool>::visit;
op::v1::ReduceSum::ReduceSum(const Output<Node>& arg, const Output<Node>& reduction_axes, bool keep_dims)
template <element::Type_t ET>
static result_type visit(const Tensor& in0, Tensor& out, const AxisSet& reduction_axes) {
using T = fundamental_type_for<ET>;
reference::reduce_sum(in0.data<const T>(), out.data<T>(), in0.get_shape(), reduction_axes);
return true;
}
};
} // namespace reduce_sum
namespace v1 {
ReduceSum::ReduceSum(const Output<Node>& arg, const Output<Node>& reduction_axes, bool keep_dims)
: ArithmeticReductionKeepDims(arg, reduction_axes, keep_dims) {
constructor_validate_and_infer_types();
}
shared_ptr<Node> op::v1::ReduceSum::clone_with_new_inputs(const OutputVector& new_args) const {
std::shared_ptr<Node> ReduceSum::clone_with_new_inputs(const OutputVector& new_args) const {
OV_OP_SCOPE(v1_ReduceSum_clone_with_new_inputs);
check_new_args_count(this, new_args);
return make_shared<ReduceSum>(new_args.at(0), new_args.at(1), get_keep_dims());
return std::make_shared<ReduceSum>(new_args.at(0), new_args.at(1), get_keep_dims());
}
OPENVINO_SUPPRESS_DEPRECATED_START
namespace reduce_sum {
namespace {
template <element::Type_t ET>
bool evaluate(const HostTensorPtr& arg, const HostTensorPtr& out, const AxisSet& axes, bool keep_dims) {
OPENVINO_SUPPRESS_DEPRECATED_START
out->set_shape(reduce(arg->get_shape(), axes, keep_dims));
OPENVINO_SUPPRESS_DEPRECATED_END
ov::reference::sum(arg->get_data_ptr<ET>(), out->get_data_ptr<ET>(), arg->get_shape(), axes);
return true;
}
bool evaluate_sum(const HostTensorPtr& arg, const HostTensorPtr& out, const AxisSet& axes, bool keep_dims) {
bool rc = true;
switch (arg->get_element_type()) {
NGRAPH_TYPE_CASE(evaluate_reduce_sum, i32, arg, out, axes, keep_dims);
NGRAPH_TYPE_CASE(evaluate_reduce_sum, i64, arg, out, axes, keep_dims);
NGRAPH_TYPE_CASE(evaluate_reduce_sum, u32, arg, out, axes, keep_dims);
NGRAPH_TYPE_CASE(evaluate_reduce_sum, u64, arg, out, axes, keep_dims);
NGRAPH_TYPE_CASE(evaluate_reduce_sum, f16, arg, out, axes, keep_dims);
NGRAPH_TYPE_CASE(evaluate_reduce_sum, f32, arg, out, axes, keep_dims);
default:
rc = false;
break;
}
return rc;
} // namespace
} // namespace
} // namespace reduce_sum
bool op::v1::ReduceSum::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const {
bool ReduceSum::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
OV_OP_SCOPE(v1_ReduceSum_evaluate);
OPENVINO_SUPPRESS_DEPRECATED_START
OPENVINO_ASSERT(validate_host_tensor_vector(inputs, 2));
OPENVINO_ASSERT(validate_host_tensor_vector(outputs, 1));
OPENVINO_ASSERT(outputs.size() == 1);
OPENVINO_ASSERT(inputs.size() == 2);
const auto reduction_axes =
get_normalized_axes_from_tensor(inputs[1], inputs[0]->get_partial_shape().rank(), get_friendly_name());
OPENVINO_SUPPRESS_DEPRECATED_END
const auto reduction_axes = get_normalized_axes_from_tensor(this, inputs[1], inputs[0].get_shape().size());
outputs[0].set_shape(ov::util::reduce(inputs[0].get_shape(), reduction_axes, get_keep_dims()));
return reduce_sum::evaluate_sum(inputs[0], outputs[0], reduction_axes, get_keep_dims());
using namespace ov::element;
return IfTypeOf<f16, f32, i32, i64, u32, u64>::apply<reduce_sum::Evaluate>(inputs[0].get_element_type(),
inputs[0],
outputs[0],
reduction_axes);
}
bool op::v1::ReduceSum::has_evaluate() const {
bool ReduceSum::has_evaluate() const {
OV_OP_SCOPE(v1_ReduceSum_has_evaluate);
switch (get_input_element_type(0)) {
case ngraph::element::i32:
case ngraph::element::i64:
case ngraph::element::u32:
case ngraph::element::u64:
case ngraph::element::f16:
case ngraph::element::f32:
case element::i32:
case element::i64:
case element::u32:
case element::u64:
case element::f16:
case element::f32:
return true;
default:
break;
return false;
}
return false;
}
} // namespace v1
} // namespace op
} // namespace ov

View File

@ -11,7 +11,7 @@ namespace ov {
namespace op {
namespace util {
AxisSet get_normalized_axes_from_tensor(const Node* const node, const Tensor& tensor, const Rank& rank) {
const auto axes = ov::get_tensor_data_as<int64_t>(tensor, ov::util::Cast<int64_t>());
const auto axes = ov::get_tensor_data_as<int64_t>(tensor);
OPENVINO_SUPPRESS_DEPRECATED_START
return {normalize_axes(node->get_friendly_name(), axes, rank)};

View File

@ -6,6 +6,7 @@
#include "openvino/op/constant.hpp"
#include "reduce_shape_inference.hpp"
#include "validation_util.hpp"
ov::op::util::ReductionBase::ReductionBase() = default;
@ -24,18 +25,16 @@ bool ov::op::util::ReductionBase::reduction_axes_constant() const {
}
const ov::AxisSet ov::op::util::ReductionBase::get_reduction_axes() const {
AxisSet axes;
OPENVINO_SUPPRESS_DEPRECATED_START
if (const auto& const_op = get_constant_from_source(input_value(1))) {
OPENVINO_SUPPRESS_DEPRECATED_END
if (const auto& const_op = ov::util::get_constant_from_source(input_value(1))) {
const auto const_data = const_op->cast_vector<int64_t>();
const auto input_data_rank = get_input_partial_shape(0).rank();
OPENVINO_SUPPRESS_DEPRECATED_START
const auto normalized_axes = ov::normalize_axes(get_friendly_name(), const_data, input_data_rank);
OPENVINO_SUPPRESS_DEPRECATED_END
axes = AxisSet{normalized_axes};
return {normalized_axes};
} else {
return {};
}
return axes;
}
void ov::op::util::ReductionBase::set_reduction_axes(const AxisSet& reduction_axes) {

View File

@ -1295,13 +1295,7 @@ std::shared_ptr<ngraph::op::v0::Constant> ngraph::get_constant_lowest_of_type(el
}
shared_ptr<ov::op::v0::Constant> ov::get_constant_from_source(const Output<Node>& source) {
if (!has_and_set_equal_bounds(source))
return nullptr;
if (const auto& c = ov::as_type_ptr<op::v0::Constant>(source.get_node_shared_ptr()))
return c;
const auto t = source.get_tensor().get_upper_value();
return std::make_shared<op::v0::Constant>(t.get_element_type(), t.get_shape(), t.data());
return ov::util::get_constant_from_source(source);
}
bool ngraph::validate_host_tensor_vector(const HostTensorVector& tensor_vector, const size_t& size) {
@ -1370,3 +1364,19 @@ std::shared_ptr<ov::op::v0::Constant> ov::util::constantfold_subgraph(const Outp
return nullptr;
return ov::as_type_ptr<op::v0::Constant>(outputs[subgraph_sink.get_index()].get_node_shared_ptr());
}
namespace ov {
namespace util {
using ov::op::v0::Constant;
std::shared_ptr<Constant> get_constant_from_source(const Output<Node>& source) {
if (const auto& c = ov::as_type_ptr<Constant>(source.get_node_shared_ptr())) {
return c;
} else if (has_and_set_equal_bounds(source)) {
return std::make_shared<Constant>(source.get_tensor().get_upper_value());
} else {
return {};
}
}
} // namespace util
} // namespace ov