Refactor shape inference factory (#15004)

* New static shape inference iface using ov::Tensors
- Add new shape inference factory
- Add helpers to create inference factory map entries
- Create map for IShapeInferCommon instead of if else switch
- Create new map for IStaticShapeInfer

* Re-factor tile shape inference to use new iface

* ov::default_label_evaluator uses ov::Tensor now

* Improve cmp::lt for mixed types unsigned and float

* Fix cpp lint issue

* Update using tile shape_inference in GPU plugin

* Do tile shape infer before repeats lock deletion

* Fix label type conversion to element type

* Rename shape infer transformation
to type utils and change namespace from ov::sh_infer_tr to ov::util

* Update shape inference utilities

* Add unit test for safe compare of values

* Update shape infer factory to be a template
and use unordered map

* Remove from_label_type as lebel_t can be used
by element:from<>
This commit is contained in:
Pawel Raasz
2023-01-26 07:44:13 +01:00
committed by GitHub
parent 8575ad690c
commit b44b4fcf2c
15 changed files with 625 additions and 345 deletions

View File

@@ -9,6 +9,7 @@
#include <unordered_set>
#include "openvino/core/dimension.hpp"
#include "openvino/core/type/element_type.hpp"
namespace ov {
/// \brief Special label value indicate no label set.

View File

@@ -34,6 +34,7 @@ public:
bool evaluate_upper(const HostTensorVector& outputs) const override;
OPENVINO_SUPPRESS_DEPRECATED_END
bool has_evaluate() const override;
bool evaluate(ov::TensorVector& output_values, const ov::TensorVector& input_values) const override;
bool evaluate_label(TensorLabelVector& output_labels) const override;
private:

View File

@@ -79,24 +79,21 @@ public:
};
/**
* \brief Compare two integers (a < b) in safe way against lossy integer conversion.
* \brief Compare two values (a < b) in safe way against lossy integer conversion.
*
* \tparam T Type of a value.
* \tparam U Type of b value.
*
* \param a Integer value.
* \param b Integer value.
* \param a Value a.
* \param b Value b.
*
* \return true if a less b otherwise false.
*/
template <
class T,
class U,
typename std::enable_if<(std::is_signed<T>::value && std::is_signed<U>::value) ||
(std::is_unsigned<T>::value && std::is_unsigned<U>::value) ||
// temporary to be able compare float element types
(std::is_floating_point<T>::value || std::is_floating_point<U>::value) ||
(std::is_same<T, float16>::value || std::is_same<U, float16>::value)>::type* = nullptr>
template <class T,
class U,
typename std::enable_if<((std::is_signed<T>::value || std::is_same<T, float16>::value) &&
(std::is_signed<U>::value || std::is_same<U, float16>::value)) ||
(std::is_unsigned<T>::value && std::is_unsigned<U>::value)>::type* = nullptr>
constexpr bool lt(T a, U b) noexcept {
return a < b;
}
@@ -109,6 +106,14 @@ constexpr bool lt(T a, U b) noexcept {
return a < 0 ? true : static_cast<typename std::make_unsigned<T>::type>(a) < b;
}
template <class T,
class U,
typename std::enable_if<(std::is_floating_point<T>::value || std::is_same<T, float16>::value) &&
std::is_unsigned<U>::value>::type* = nullptr>
constexpr bool lt(T a, U b) noexcept {
return a < 0 ? true : a < b;
}
template <class T,
class U,
typename std::enable_if<std::is_unsigned<T>::value && std::is_integral<U>::value &&
@@ -117,51 +122,59 @@ constexpr bool lt(T a, U b) noexcept {
return b < 0 ? false : a < static_cast<typename std::make_unsigned<U>::type>(b);
}
template <class T,
class U,
typename std::enable_if<std::is_unsigned<T>::value && (std::is_floating_point<U>::value ||
std::is_same<U, float16>::value)>::type* = nullptr>
constexpr bool lt(T a, U b) noexcept {
return b < 0 ? false : a < b;
}
/**
* \brief Compare two integers (a > b) in safe way against lossy integer conversion.
* \brief Compare two values (a > b) in safe way against lossy integer conversion.
*
* \tparam T Type of a value.
* \tparam U Type of b value.
*
* \param a Integer value.
* \param b Integer value.
* \param a Value a.
* \param b Value b.
*
* \return true if a > b otherwise false.
*/
template <class T, class U>
bool gt(T a, U b) noexcept {
constexpr bool gt(T a, U b) noexcept {
return lt(b, a);
}
/**
* \brief Compare two integers (a <= b) in safe way against lossy integer conversion.
* \brief Compare two values (a <= b) in safe way against lossy integer conversion.
*
* \tparam T Type of a value.
* \tparam U Type of b value.
*
* \param a Integer value.
* \param b Integer value.
* \param a Value a.
* \param b Value b.
*
* \return true if a <= b otherwise false.
*/
template <class T, class U>
bool le(T a, U b) noexcept {
constexpr bool le(T a, U b) noexcept {
return !gt(a, b);
}
/**
* \brief Compare two integers (a >= b) in safe way against lossy integer conversion.
* \brief Compare two values (a >= b) in safe way against lossy integer conversion.
*
* \tparam T Type of a value.
* \tparam U Type of b value.
*
* \param a Integer value.
* \param b Integer value.
* \param a Value a.
* \param b Value b.
*
* \return true if a >= b otherwise false.
*/
template <class T, class U>
bool ge(T a, U b) noexcept {
constexpr bool ge(T a, U b) noexcept {
return !lt(a, b);
}
} // namespace cmp

View File

@@ -0,0 +1,50 @@
// Copyright (C) 2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "compare.hpp"
#include "openvino/core/except.hpp"
namespace ov {
namespace util {
/**
* \brief Trnsform tensor data by cast them to type T
*
* \tparam T Type of returned value.
*/
template <class T>
struct Cast {
constexpr Cast() = default;
template <class U>
constexpr T operator()(const U u) const {
return static_cast<T>(u);
}
};
/**
* \brief Check if input data is in [T::min(), T::max()] and then cast it to T.
*
* \tparam T Type of returned value and used to specified min, max of valid value range.
*
* \throws ov::AssertFailure if input value not in type range.
*/
template <class T>
struct InTypeRange {
const T m_min{std::numeric_limits<T>::lowest()}, m_max{std::numeric_limits<T>::max()};
constexpr InTypeRange() = default;
constexpr InTypeRange(const T& min, const T& max) : m_min{min}, m_max{max} {};
template <class U>
T operator()(const U u) const {
OPENVINO_ASSERT(cmp::le(m_min, u) && cmp::le(u, m_max), "Value ", u, " not in range [", m_min, ":", m_max, "]");
return static_cast<T>(u);
}
};
} // namespace util
} // namespace ov

View File

@@ -94,12 +94,10 @@ void shape_infer(const Slice* op,
return;
}
constexpr auto cast_i64 = sh_infer::tr::Cast<int64_t>();
// compute constant values of begin, end, and strides if possible
const auto start = slice::get_input_bounds<T>(op, 1, constant_data);
const auto stop = slice::get_input_bounds<T>(op, 2, constant_data);
const auto steps = get_input_const_data_as<T, int64_t>(op, 3, constant_data, cast_i64);
const auto steps = get_input_const_data_as<T, int64_t>(op, 3, constant_data);
slice::AxesMap axes_map;
if (input_shapes.size() > 4) {
@@ -107,7 +105,7 @@ void shape_infer(const Slice* op,
input_shapes[4].compatible(start_shape),
"Slice `axes` input must have compatible shape with `start`, `stop`, `step` inputs.");
if (auto axes = get_input_const_data_as<T, int64_t>(op, 4, constant_data, cast_i64)) {
if (auto axes = get_input_const_data_as<T, int64_t>(op, 4, constant_data)) {
ov::normalize_axes(op, input_shape.rank().get_length(), *axes);
axes_map.add(*axes);
NODE_VALIDATION_CHECK(op, axes_map.is_valid, "Slice values in `axes` input must be unique.");

View File

@@ -233,8 +233,7 @@ std::unique_ptr<TResult> get_input_bounds(const ov::Node* op,
};
std::unique_ptr<TResult> out;
if (auto lowers =
op::get_input_const_data_as<TShape, int64_t>(op, idx, constant_data, sh_infer::tr::Cast<int64_t>())) {
if (auto lowers = op::get_input_const_data_as<TShape, int64_t>(op, idx, constant_data)) {
const auto& et = get_input_const_element_type(op, idx, constant_data);
out.reset(new TResult(make_bounds_vec(et, *lowers, *lowers)));
} else {

View File

@@ -64,7 +64,7 @@ void shape_infer(const StridedSlice* op,
std::unique_ptr<std::vector<int64_t>> strides;
if (input_shapes.size() > 3) {
strides = get_input_const_data_as<T, int64_t>(op, 3, constant_data, sh_infer::tr::Cast<int64_t>());
strides = get_input_const_data_as<T, int64_t>(op, 3, constant_data);
} else if (begin) {
// generate default strides
strides.reset(new std::vector<int64_t>(begin->size(), 1));

View File

@@ -12,24 +12,23 @@ namespace op {
namespace v0 {
template <class T>
void shape_infer(const Tile* op,
const std::vector<T>& input_shapes,
std::vector<T>& output_shapes,
const std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>>& constant_data = {}) {
std::vector<T> shape_infer(const Tile* op,
const std::vector<T>& input_shapes,
const std::map<size_t, std::reference_wrapper<const ov::Tensor>>& constant_data = {}) {
using TDim = typename T::value_type;
using TDimValue = typename TDim::value_type;
NODE_VALIDATION_CHECK(op, input_shapes.size() == 2 && output_shapes.size() == 1);
NODE_VALIDATION_CHECK(op, input_shapes.size() == 2);
const auto& repeats_shape = input_shapes[1];
NODE_VALIDATION_CHECK(op, repeats_shape.rank().compatible(1), "Tile repeats must be of rank 1");
const auto& arg_shape = input_shapes[0];
auto& output_shape = output_shapes[0];
T output_shape;
// Get repeats and pre process values
auto negative_repeats_to_zero = [](const TDimValue v) -> TDimValue {
return std::max<TDimValue>(0, sh_infer::tr::InTypeRange<TDimValue>()(v));
return std::max<TDimValue>(0, ov::util::InTypeRange<TDimValue>()(v));
};
auto repeats = get_input_const_data_as_shape<T>(op, 1, constant_data, negative_repeats_to_zero);
@@ -37,21 +36,21 @@ void shape_infer(const Tile* op,
const auto& arg_rank = arg_shape.rank();
if (arg_rank.is_static() && repeats) {
const auto output_rank = std::max(arg_shape.size(), repeats->size());
std::vector<TDim> dims;
dims.reserve(output_rank);
output_shape.reserve(output_rank);
// add missing repeats
repeats->insert(repeats->begin(), output_rank - repeats->size(), TDim{1});
// insert missing input dimensions
auto rep_it = std::next(repeats->begin(), output_rank - arg_shape.size());
dims.insert(dims.begin(), repeats->begin(), rep_it);
output_shape.insert(output_shape.begin(), repeats->begin(), rep_it);
// calc repeated output dimensions
std::transform(arg_shape.begin(), arg_shape.end(), rep_it, std::back_inserter(dims), std::multiplies<TDim>());
output_shape = T(std::move(dims));
std::transform(arg_shape.begin(),
arg_shape.end(),
rep_it,
std::back_inserter(output_shape),
std::multiplies<TDim>());
} else if (arg_rank.is_static() && repeats_shape[0].is_static()) {
// unknown repeats but shape is 1-D static, any dim can be repeated (add missing dimension)
output_shape.resize(std::max<size_t>(arg_rank.get_length(), repeats_shape[0].get_length()));
@@ -59,6 +58,7 @@ void shape_infer(const Tile* op,
// can't deduce shape, set default value
output_shape = PartialShape::dynamic();
}
return {output_shape};
}
} // namespace v0
} // namespace op

View File

@@ -8,7 +8,7 @@
#include <openvino/opsets/opset1.hpp>
#include <type_traits>
#include "shape_infer_transformations.hpp"
#include "shape_infer_type_utils.hpp"
template <class OpType, class T>
void copy_shape_infer(const OpType* op, const std::vector<T>& input_shapes, std::vector<T>& output_shapes) {
@@ -154,7 +154,7 @@ TResult get_raw_data_as(const element::Type_t et, const void* const ptr, const s
std::forward<UnaryOperation>(func));
} break;
default:
OPENVINO_ASSERT(false, "Not supported element type ", et);
OPENVINO_ASSERT(false, "Get raw data from tensor is not supported for element type: ", et);
};
return out;
}
@@ -177,6 +177,11 @@ TResult get_tensor_data_as(HostTensor& tv, UnaryOperation&& func) {
return get_tensor_data_as<T, TResult>(t, std::forward<UnaryOperation>(func));
}
template <class T, class TResult = std::vector<T>, class UnaryOperation>
TResult get_tensor_data_as(HostTensor* tv, UnaryOperation&& func) {
return get_tensor_data_as<T, TResult>(*tv, std::forward<UnaryOperation>(func));
}
/**
* \brief Get data from ov:tensor as object TResult.
*
@@ -207,6 +212,7 @@ namespace op {
* \tparam TShape Shape type which enabled this version (not ov::PartialShape)
* \tparam TData Type use to cast input's data.
* \tparam TRes Result type which has got default type as std::vector<TData>.
* \tparam TTensorPtr Type of tensor pointer or reference_wrapper. Default HostTensorPtr.
* \tparam UnaryOperation Unary function object applied on data with signature (Ret f(const TData &a)).
*
* \param op Pointer to operator.
@@ -219,15 +225,16 @@ namespace op {
template <class TShape,
class TData,
class TRes = std::vector<TData>,
class UnaryOperation,
class TTensorPtr = HostTensorPtr,
class UnaryOperation = ov::util::Cast<TData>,
typename std::enable_if<!std::is_same<TShape, ov::PartialShape>::value>::type* = nullptr>
std::unique_ptr<TRes> get_input_const_data_as(const ov::Node* op,
size_t idx,
const std::map<size_t, HostTensorPtr>& constant_data = {},
UnaryOperation&& func = sh_infer::tr::Cast<TData>()) {
const std::map<size_t, TTensorPtr>& constant_data = {},
UnaryOperation&& func = ov::util::Cast<TData>()) {
if (constant_data.count(idx)) {
return std::unique_ptr<TRes>(
new TRes(get_tensor_data_as<TData, TRes>(*constant_data.at(idx), std::forward<UnaryOperation>(func))));
new TRes(get_tensor_data_as<TData, TRes>(constant_data.at(idx).get(), std::forward<UnaryOperation>(func))));
} else {
const auto& constant = ov::as_type_ptr<ov::opset1::Constant>(op->get_input_node_shared_ptr(idx));
NODE_VALIDATION_CHECK(op, constant != nullptr, "Static shape inference lacks constant data on port ", idx);
@@ -249,6 +256,7 @@ std::unique_ptr<TRes> get_input_const_data_as(const ov::Node* op,
* \tparam TShape Shape type which enabled this version (ov::PartialShape)
* \tparam TData Type use to cast input's data.
* \tparam TRes Result type which has got default type as std::vector<TData>.
* \tparam TTensorPtr Type of tensor pointer or reference_wrapper. Default HostTensorPtr.
* \tparam UnaryOperation Unary function object applied on data with signature (Ret f(const TData &a)).
*
* \param op Pointer to operator.
@@ -261,15 +269,16 @@ std::unique_ptr<TRes> get_input_const_data_as(const ov::Node* op,
template <class TShape,
class TData,
class TRes = std::vector<TData>,
class UnaryOperation,
class TTensorPtr = HostTensorPtr,
class UnaryOperation = ov::util::Cast<TData>,
typename std::enable_if<std::is_same<TShape, ov::PartialShape>::value>::type* = nullptr>
std::unique_ptr<TRes> get_input_const_data_as(const ov::Node* op,
size_t idx,
const std::map<size_t, HostTensorPtr>& constant_data = {},
UnaryOperation&& func = sh_infer::tr::Cast<TData>()) {
const std::map<size_t, TTensorPtr>& constant_data = {},
UnaryOperation&& func = ov::util::Cast<TData>()) {
if (constant_data.count(idx)) {
return std::unique_ptr<TRes>(
new TRes(get_tensor_data_as<TData, TRes>(*constant_data.at(idx), std::forward<UnaryOperation>(func))));
new TRes(get_tensor_data_as<TData, TRes>(constant_data.at(idx).get(), std::forward<UnaryOperation>(func))));
} else if (const auto& constant = ov::get_constant_from_source(op->input_value(idx))) {
const auto& et = constant->get_element_type();
const auto& shape = constant->get_shape();
@@ -288,36 +297,37 @@ std::unique_ptr<TRes> get_input_const_data_as(const ov::Node* op,
* The input data can be processed by unary operation. By default is validated and casted to shape's dimension type.
*
* \tparam TShape
* \tparam TTensorPtr Type of tensor pointer or reference_wrapper. Default HostTensorPtr.
* \tparam UnaryOperation Unary function object applied on data with signature (Ret f(const TDimValue &a)).
*
* \param op Pointer to operator.
* \param idx Operator input index.
* \param constant_data Map with constant data. Default empty.
* \param func Unary operation function object to apply in input data.
* Default sh_infer::tr::InTypeRange<TDimValue>.
* Default ov::utils::InTypeRange<TDimValue>.
*
* \return Unique pointer to shape created from input data.
*/
template <class TShape,
class TDimValue = typename TShape::value_type::value_type,
class UnaryOperation = sh_infer::tr::InTypeRange<TDimValue>>
std::unique_ptr<TShape> get_input_const_data_as_shape(
const ov::Node* op,
size_t idx,
const std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>>& constant_data = {},
UnaryOperation&& func = sh_infer::tr::InTypeRange<TDimValue>()) {
class TTensorPtr = HostTensorPtr,
class UnaryOperation = ov::util::InTypeRange<TDimValue>>
std::unique_ptr<TShape> get_input_const_data_as_shape(const ov::Node* op,
size_t idx,
const std::map<size_t, TTensorPtr>& constant_data = {},
UnaryOperation&& func = ov::util::InTypeRange<TDimValue>()) {
std::unique_ptr<TShape> shape_ptr;
if (auto d =
get_input_const_data_as<TShape, TDimValue>(op, idx, constant_data, std::forward<UnaryOperation>(func))) {
shape_ptr.reset(new TShape(std::move(*d)));
return std::unique_ptr<TShape>(new TShape(std::move(*d)));
} else {
PartialShape shape;
if (ov::evaluate_as_partial_shape(op->input_value(idx), shape)) {
shape_ptr.reset(new TShape(std::move(shape)));
return std::unique_ptr<TShape>(new TShape(std::move(shape)));
}
}
return shape_ptr;
return {};
}
} // namespace op
} // namespace ov
@@ -328,8 +338,7 @@ inline bool get_data_as(const ov::Node* op,
size_t idx,
std::vector<TData>& data_out,
const std::map<size_t, ov::HostTensorPtr>& constant_data = {}) {
if (auto out =
ov::op::get_input_const_data_as<TShape, TData>(op, idx, constant_data, ov::sh_infer::tr::Cast<TData>())) {
if (auto out = ov::op::get_input_const_data_as<TShape, TData>(op, idx, constant_data, ov::util::Cast<TData>())) {
data_out = std::move(*out);
return true;
} else {
@@ -374,8 +383,8 @@ inline bool get_data_as_shape(size_t idx,
TShape& shape,
const std::map<size_t, ov::HostTensorPtr>& constant_data = {}) {
using TDimValue = typename TShape::value_type::value_type;
shape = std::move(
*ov::op::get_input_const_data_as_shape<TShape>(op, idx, constant_data, ov::sh_infer::tr::Cast<TDimValue>()));
shape =
std::move(*ov::op::get_input_const_data_as_shape<TShape>(op, idx, constant_data, ov::util::Cast<TDimValue>()));
return true;
}

View File

@@ -35,9 +35,7 @@ void op::v0::Tile::validate_and_infer_types() {
"Tile repeats must have any integer element type, but has ",
repeats_et);
const auto input_shapes = get_node_input_partial_shapes(*this);
auto output_shapes = std::vector<PartialShape>(1, ov::PartialShape{});
shape_infer(this, input_shapes, output_shapes);
auto output_shapes = shape_infer(this, get_node_input_partial_shapes(*this));
set_output_type(0, get_input_element_type(0), output_shapes[0]);
set_input_is_relevant_to_shape(0);
@@ -57,10 +55,11 @@ bool op::v0::Tile::evaluate_tile(const HostTensorVector& outputs, const HostTens
auto repeats_val = read_index_vector(axis);
const auto repeats_rank = repeats_val.size();
std::vector<ov::PartialShape> output_shapes = {ov::PartialShape{}};
std::vector<ov::PartialShape> input_shapes = {data->get_shape(), axis->get_shape()};
shape_infer(this, input_shapes, output_shapes, {{1, axis}});
const auto& output_shape = output_shapes[0].to_shape();
auto axis_tensor = Tensor(axis->get_element_type(), axis->get_shape(), axis->get_data_ptr());
auto const_map = std::map<size_t, std::reference_wrapper<const Tensor>>{{1, axis_tensor}};
const auto input_shapes = std::vector<ov::PartialShape>{data->get_shape(), axis->get_shape()};
const auto& output_shape = shape_infer(this, input_shapes, const_map).front().to_shape();
if (!output->get_is_allocated()) {
output->set_shape(output_shape);
}
@@ -75,9 +74,28 @@ bool op::v0::Tile::evaluate_tile(const HostTensorVector& outputs, const HostTens
return true;
}
bool op::v0::Tile::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const {
bool op::v0::Tile::evaluate(ov::TensorVector& output_values, const ov::TensorVector& input_values) const {
OV_OP_SCOPE(v0_Tile_evaluate);
return evaluate_tile(outputs, inputs);
const auto& data = input_values[0];
const auto& axis = input_values[1];
auto& output = output_values[0];
auto repeats_val = get_tensor_data_as<int64_t>(axis, ov::util::Cast<int64_t>());
const auto repeats_rank = repeats_val.size();
std::vector<ov::PartialShape> input_shapes = {data.get_shape(), axis.get_shape()};
auto const_map = std::map<size_t, std::reference_wrapper<const Tensor>>{{1, axis}};
const auto& output_shape = shape_infer(this, input_shapes, const_map).front().to_shape();
output.set_shape(output_shape);
repeats_val.insert(repeats_val.begin(), output_shape.size() - repeats_rank, 1);
ngraph::runtime::reference::tile(static_cast<const char*>(data.data()),
static_cast<char*>(output.data()),
data.get_shape(),
output_shape,
data.get_element_type().size(),
repeats_val);
return true;
}
bool op::v0::Tile::has_evaluate() const {
@@ -85,6 +103,13 @@ bool op::v0::Tile::has_evaluate() const {
return true;
}
bool op::v0::Tile::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const {
// This duplicate version for ov::Tensor because template plugin and shape inference utils
// are not ready for usage with ov::Tensor when it happens this function can be removed.
OV_OP_SCOPE(v0_Tile_evaluate);
return evaluate_tile(outputs, inputs);
}
bool op::v0::Tile::evaluate_lower(const HostTensorVector& output_values) const {
OV_OP_SCOPE(v0_Tile_evaluate_lower);

View File

@@ -1348,43 +1348,46 @@ bool ov::evaluate_as_partial_shape(const Output<Node>& output, PartialShape& psh
}
bool ov::default_label_evaluator(const Node* node, TensorLabelVector& output_labels) {
const auto& input_values = node->input_values();
const auto& inputs_count = node->get_input_size();
if (inputs_count > 0) {
const auto& labels = node->get_input_tensor(0).get_value_label();
if (!has_no_labels(labels)) {
TensorVector inputs;
inputs.reserve(inputs_count);
HostTensorVector input_tensors(input_values.size());
for (size_t i = 0; i < input_values.size(); ++i) {
const auto& input = input_values[i];
if (i != 0) {
if (input.get_tensor().has_and_set_bound())
input_tensors[i] = input.get_tensor().get_lower_value();
else
return false;
} else {
const auto& input_labels = input.get_tensor().get_value_label();
if (has_no_labels(input_labels)) {
return false;
inputs.emplace_back(element::from<label_t>(), node->get_input_shape(0));
std::copy(labels.begin(), labels.end(), inputs.back().data<label_t>());
for (size_t i = 1; i < inputs_count; ++i) {
if (node->get_input_tensor(i).has_and_set_bound()) {
const auto& et = node->get_input_element_type(i);
const auto& shape = node->get_input_shape(i);
inputs.emplace_back(et, shape, node->get_input_tensor(i).get_lower_value()->get_data_ptr());
} else {
return false;
}
}
auto labels_constant = op::v0::Constant::create(ov::element::u64, input.get_shape(), input_labels);
auto idxs_htp = std::make_shared<HostTensor>(labels_constant);
input_tensors[i] = idxs_htp;
const auto& outputs_count = node->get_output_size();
TensorVector outputs;
outputs.reserve(outputs_count);
for (size_t i = 0; i < outputs_count; ++i) {
const auto& partial_shape = node->get_output_partial_shape(i);
// Set shape for static or Shape{0} for dynamic to postpone memory allocation
auto shape = partial_shape.is_static() ? partial_shape.to_shape() : Shape{0};
outputs.emplace_back(element::from<label_t>(), shape);
}
if (node->evaluate(outputs, inputs)) {
std::transform(outputs.cbegin(), outputs.cend(), output_labels.begin(), [](const Tensor& t) {
// Return empty label tensor if input tensor not valid (can have Shape{0})
return t ? TensorLabel(t.data<label_t>(), t.data<label_t>() + t.get_size()) : TensorLabel();
});
return true;
}
}
}
HostTensorVector output_tensors;
output_tensors.reserve(node->get_output_size());
for (size_t i = 0; i < node->get_output_size(); ++i) {
output_tensors.push_back(std::make_shared<HostTensor>(element::u64, node->get_output_partial_shape(i)));
}
if (node->evaluate(output_tensors, input_tensors)) {
std::transform(output_tensors.cbegin(),
output_tensors.cend(),
output_labels.begin(),
[](const HostTensorPtr& tensor) {
return std::make_shared<op::v0::Constant>(tensor)->cast_vector<label_t>();
});
return true;
}
return false;
}