Introduce data accessor function for infer in IStaticShapeInfer (#15574)

* Tensor accessor for shape inference
- as functor for getting data from tensor vector or map.
- as lambda in GPU plugin on tile op

* Make tensor data adapter pure virtual
- function accessor to data returns pointer to interface

* Refactor tensor data accessor and adapter

* Extract memory adapter make it GPU graph internal
- can't be part of GPU runtime memory core dev API not visible there

* Expand IStaticShapeInfer by port map
- update factory map for new infer interface with port map information
- add bit util to generate bit mask use it in PortMask

* Pass tensor accessor as reference not fun object
- Add cldnn data adapter and accessor
- Reduce dynamic allocations in data accessors

* Fix compilation issues

* Use ov::Tensor for data accessor
- remove data adapters are they not required

* Update comments

* Fix build issues

* Fix tile shape infer test

* Add empty null tensor accessor as specialization

* Apply style formatting

* Move data accessor from dev API to shape inference

* Fix linking issues
This commit is contained in:
Pawel Raasz
2023-05-11 11:30:30 +02:00
committed by GitHub
parent 30395c3e96
commit c13423e2ca
12 changed files with 420 additions and 104 deletions

View File

@@ -39,7 +39,7 @@ namespace v1 {
template <class TShape>
std::vector<TShape> shape_infer(const Reverse* op,
const std::vector<TShape>& input_shapes,
const std::map<size_t, std::reference_wrapper<const ov::Tensor>>& constant_data = {}) {
const ITensorAccessor& tensor_accessor = make_tensor_accessor()) {
NODE_VALIDATION_CHECK(op, input_shapes.size() == 2);
const auto& data_shape = input_shapes[0];
@@ -69,7 +69,7 @@ std::vector<TShape> shape_infer(const Reverse* op,
"AxisSet::value_type != ClipNegative::value_type");
if (const auto axes =
get_input_const_data_as<TShape, TAxis, AxisSet>(op, 1, constant_data, util::ClipNegative())) {
get_input_const_data_as<TShape, TAxis, AxisSet>(op, 1, tensor_accessor, util::ClipNegative())) {
NODE_VALIDATION_CHECK(op,
all_of(axes->begin(), axes->end(), cmp::Less<TAxis>(data_rank.get_length())),
"Some of the provided axes (",

View File

@@ -0,0 +1,92 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "openvino/core/node.hpp"
#include "openvino/runtime/tensor.hpp"
namespace ov {
/** @brief Interface for data accessor. */
class ITensorAccessor {
public:
/**
* @brief Get tensor at port.
*
* @param port Number of data port (operator input) to get tensor.
* @return Tensor to data at port.
*/
virtual Tensor operator()(size_t port) const = 0;
virtual ~ITensorAccessor() = default;
};
/**
* @brief Tensor data accessor functor.
*
* Creates the ov::Tensor found in tensors container.
* This accessor not take ownership of tensors container.
* Supports following containers:
* - ov::TensorVector
* - ov::HostTensorVector
* - std::map<size_t, ov::HostTensorPtr>
*
* @tparam TContainer Type of tensor container.
*/
template <class TContainer>
class TensorAccessor : public ITensorAccessor {
public:
/**
* @brief Construct a new Tensor Accessor object for tensors container.
*
* @param tensors Pointer to container with tensors.
*/
TensorAccessor(const TContainer* tensors) : m_tensors{tensors} {}
/**
* @brief Get tensor for given port number.
*
* @param port Port number to get data.
*
* @return Tensor to data or empty tensor if data not found.
*/
Tensor operator()(size_t port) const override;
private:
const TContainer* m_tensors; //!< Pointer to tensor container.
};
template <>
Tensor TensorAccessor<TensorVector>::operator()(size_t port) const;
template <>
Tensor TensorAccessor<HostTensorVector>::operator()(size_t port) const;
template <>
Tensor TensorAccessor<std::map<size_t, HostTensorPtr>>::operator()(size_t port) const;
template <>
Tensor TensorAccessor<void>::operator()(size_t port) const;
/**
* @brief Makes TensorAccessor for specific tensor container.
*
* @tparam TContainer Type of tensor containers @see TensorAccessor for supported types.
*
* @param c Container of tensors.
*
* @return TensorContainer for specific type.
*/
template <class TContainer>
auto make_tensor_accessor(const TContainer& c) -> TensorAccessor<TContainer> {
return TensorAccessor<TContainer>(&c);
}
/**
* @brief Makes empty TensorAccessor which return empty tensor for any port number.
*
* @return TensorAccessor to return empty tensor.
*/
auto make_tensor_accessor() -> TensorAccessor<void>;
} // namespace ov

View File

@@ -23,7 +23,7 @@ struct NegativeToZero {
template <class T>
std::vector<T> shape_infer(const Tile* op,
const std::vector<T>& input_shapes,
const std::map<size_t, std::reference_wrapper<const ov::Tensor>>& constant_data = {}) {
const ITensorAccessor& tensor_accessor = make_tensor_accessor()) {
using TDim = typename T::value_type;
using TDimValue = typename TDim::value_type;
@@ -39,7 +39,7 @@ std::vector<T> shape_infer(const Tile* op,
// Get repeats and pre process values
constexpr auto negative_repeats_to_zero = NegativeToZero<TDimValue>();
auto repeats = get_input_const_data_as_shape<T>(op, 1, constant_data, negative_repeats_to_zero);
auto repeats = get_input_const_data_as_shape<T>(op, 1, tensor_accessor, negative_repeats_to_zero);
const auto& arg_rank = arg_shape.rank();
if (arg_rank.is_static() && repeats) {

View File

@@ -10,6 +10,7 @@
#include "bound_evaluation_util.hpp"
#include "shape_infer_type_utils.hpp"
#include "tensor_data_accessor.hpp"
template <class OpType, class T>
void copy_shape_infer(const OpType* op, const std::vector<T>& input_shapes, std::vector<T>& output_shapes) {
@@ -214,29 +215,26 @@ namespace op {
* \tparam TShape Shape type which enabled this version (not ov::PartialShape)
* \tparam TData Type use to cast input's data.
* \tparam TRes Result type which has got default type as std::vector<TData>.
* \tparam TTensorPtr Type of tensor pointer or reference_wrapper. Default HostTensorPtr.
* \tparam UnaryOperation Unary function object applied on data with signature (Ret f(const TData &a)).
*
* \param op Pointer to operator.
* \param idx Operator's input number.
* \param constant_data Map with constant. Default empty.
* \param func Unary operation function object.
* \param op Pointer to operator.
* \param idx Operator's input number.
* \param tensor_accessor Tensor accessor object.
* \param func Unary operation function object.
*
* \return Pointer to constant data or nullptr if input has no constant data.
*/
template <class TShape,
class TData,
class TRes = std::vector<TData>,
class TTensorPtr = HostTensorPtr,
class UnaryOperation = ov::util::Cast<TData>,
typename std::enable_if<!std::is_same<TShape, ov::PartialShape>::value>::type* = nullptr>
std::unique_ptr<TRes> get_input_const_data_as(const ov::Node* op,
size_t idx,
const std::map<size_t, TTensorPtr>& constant_data = {},
const ITensorAccessor& tensor_accessor,
UnaryOperation&& func = ov::util::Cast<TData>()) {
if (constant_data.count(idx)) {
return std::unique_ptr<TRes>(
new TRes(get_tensor_data_as<TData, TRes>(constant_data.at(idx).get(), std::forward<UnaryOperation>(func))));
if (auto t = tensor_accessor(idx)) {
return std::unique_ptr<TRes>(new TRes(get_tensor_data_as<TData, TRes>(t, std::forward<UnaryOperation>(func))));
} else {
const auto& constant = ov::as_type_ptr<ov::opset1::Constant>(op->get_input_node_shared_ptr(idx));
NODE_VALIDATION_CHECK(op, constant != nullptr, "Static shape inference lacks constant data on port ", idx);
@@ -258,31 +256,29 @@ std::unique_ptr<TRes> get_input_const_data_as(const ov::Node* op,
* \tparam TShape Shape type which enabled this version (ov::PartialShape)
* \tparam TData Type use to cast input's data.
* \tparam TRes Result type which has got default type as std::vector<TData>.
* \tparam TTensorPtr Type of tensor pointer or reference_wrapper. Default HostTensorPtr.
* \tparam UnaryOperation Unary function object applied on data with signature (Ret f(const TData &a)).
*
* \param op Pointer to operator.
* \param idx Operator's input number.
* \param constant_data Map with constant. Default empty.
* \param func Unary operation function object.
* \param op Pointer to operator.
* \param idx Operator's input number.
* \param tensor_accessor Tensor accessor object.
* \param func Unary operation function object.
*
* \return Pointer to constant data or nullptr if input has no constant data.
*/
template <class TShape,
class TData,
class TRes = std::vector<TData>,
class TTensorPtr = HostTensorPtr,
class UnaryOperation = ov::util::Cast<TData>,
typename std::enable_if<std::is_same<TShape, ov::PartialShape>::value>::type* = nullptr>
std::unique_ptr<TRes> get_input_const_data_as(const ov::Node* op,
size_t idx,
const std::map<size_t, TTensorPtr>& constant_data = {},
const ITensorAccessor& tensor_accessor,
UnaryOperation&& func = ov::util::Cast<TData>()) {
if (constant_data.count(idx)) {
return std::unique_ptr<TRes>(
new TRes(get_tensor_data_as<TData, TRes>(constant_data.at(idx).get(), std::forward<UnaryOperation>(func))));
if (auto t = tensor_accessor(idx)) {
return std::unique_ptr<TRes>(new TRes(get_tensor_data_as<TData, TRes>(t, std::forward<UnaryOperation>(func))));
OPENVINO_SUPPRESS_DEPRECATED_START
} else if (const auto& constant = ov::get_constant_from_source(op->input_value(idx))) {
} else if (const auto& constant =
(idx < op->get_input_size()) ? ov::get_constant_from_source(op->input_value(idx)) : nullptr) {
OPENVINO_SUPPRESS_DEPRECATED_END
const auto& et = constant->get_element_type();
const auto& shape = constant->get_shape();
@@ -295,13 +291,79 @@ std::unique_ptr<TRes> get_input_const_data_as(const ov::Node* op,
}
}
/**
* \brief Get the input const data as shape object.
*
* The input data can be processed by unary operation. By default is validated and casted to shape's dimension type.
*
* \tparam TShape Shape type.
* \tparam TDimValue Dimension value type.
* \tparam UnaryOperation Unary function object applied on data with signature (Ret f(const TDimValue &a)).
*
* \param op Pointer to operator.
* \param idx Operator input index.
* \param tensor_accessor Tensor accessor object.
* \param func Unary operation function object to apply in input data.
* Default ov::utils::InTypeRange<TDimValue>.
*
* \return Unique pointer to shape created from input data.
*/
template <class TShape,
class TDimValue = typename TShape::value_type::value_type,
class UnaryOperation = ov::util::InTypeRange<TDimValue>>
std::unique_ptr<TShape> get_input_const_data_as_shape(const ov::Node* op,
size_t idx,
const ITensorAccessor& tensor_accessor,
UnaryOperation&& func = ov::util::InTypeRange<TDimValue>()) {
if (auto s = get_input_const_data_as<TShape, TDimValue, TShape>(op,
idx,
tensor_accessor,
std::forward<UnaryOperation>(func))) {
return s;
} else {
PartialShape shape;
OPENVINO_SUPPRESS_DEPRECATED_START
if ((idx < op->get_input_size()) && ov::evaluate_as_partial_shape(op->input_value(idx), shape)) {
OPENVINO_SUPPRESS_DEPRECATED_END
return std::unique_ptr<TShape>(new TShape(std::move(shape)));
}
}
return {};
}
/**
* \brief Get the operator's input const as pointer to vector of specified type.
*
* The behaviour depends on shape type. The default output type is std::vector<TData> can be replace by other type
* which if is possible to construct it from constant data vector.
*
* \tparam TShape Shape type which enabled this version (not ov::PartialShape)
* \tparam TData Type use to cast input's data.
* \tparam TRes Result type which has got default type as std::vector<TData>.
* \tparam UnaryOperation Unary function object applied on data with signature (Ret f(const TData &a)).
*
* \param op Pointer to operator.
* \param idx Operator's input number.
* \param constant_data Map with constant. Default empty.
* \param func Unary operation function object.
*
* \return Pointer to constant data or nullptr if input has no constant data.
*/
template <class TShape, class TData, class TRes = std::vector<TData>, class UnaryOperation = ov::util::Cast<TData>>
std::unique_ptr<TRes> get_input_const_data_as(const ov::Node* op,
size_t idx,
const std::map<size_t, HostTensorPtr>& constant_data = {},
UnaryOperation&& func = ov::util::Cast<TData>()) {
const auto tensor_accessor = make_tensor_accessor(constant_data);
return get_input_const_data_as<TShape, TData, TRes>(op, idx, tensor_accessor, std::forward<UnaryOperation>(func));
}
/**
* \brief Get the input const data as shape object.
*
* The input data can be processed by unary operation. By default is validated and casted to shape's dimension type.
*
* \tparam TShape
* \tparam TTensorPtr Type of tensor pointer or reference_wrapper. Default HostTensorPtr.
* \tparam UnaryOperation Unary function object applied on data with signature (Ret f(const TDimValue &a)).
*
* \param op Pointer to operator.
@@ -314,26 +376,16 @@ std::unique_ptr<TRes> get_input_const_data_as(const ov::Node* op,
*/
template <class TShape,
class TDimValue = typename TShape::value_type::value_type,
class TTensorPtr = HostTensorPtr,
class UnaryOperation = ov::util::InTypeRange<TDimValue>>
std::unique_ptr<TShape> get_input_const_data_as_shape(const ov::Node* op,
size_t idx,
const std::map<size_t, TTensorPtr>& constant_data = {},
const std::map<size_t, HostTensorPtr>& constant_data = {},
UnaryOperation&& func = ov::util::InTypeRange<TDimValue>()) {
std::unique_ptr<TShape> shape_ptr;
if (auto d =
get_input_const_data_as<TShape, TDimValue>(op, idx, constant_data, std::forward<UnaryOperation>(func))) {
return std::unique_ptr<TShape>(new TShape(std::move(*d)));
} else {
PartialShape shape;
OPENVINO_SUPPRESS_DEPRECATED_START
if (ov::evaluate_as_partial_shape(op->input_value(idx), shape)) {
OPENVINO_SUPPRESS_DEPRECATED_END
return std::unique_ptr<TShape>(new TShape(std::move(shape)));
}
}
return {};
const auto tensor_accessor = make_tensor_accessor(constant_data);
return get_input_const_data_as_shape<TShape, TDimValue>(op,
idx,
tensor_accessor,
std::forward<UnaryOperation>(func));
}
/**

View File

@@ -0,0 +1,48 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "tensor_data_accessor.hpp"
namespace ov {
template <>
Tensor TensorAccessor<TensorVector>::operator()(size_t port) const {
if (port < m_tensors->size()) {
return (*m_tensors)[port];
} else {
return make_tensor_accessor()(port);
}
}
template <>
Tensor TensorAccessor<HostTensorVector>::operator()(size_t port) const {
if (port < m_tensors->size()) {
auto ptr = (*m_tensors)[port];
return {ptr->get_element_type(), ptr->get_shape(), ptr->get_data_ptr()};
} else {
return make_tensor_accessor()(port);
}
}
template <>
Tensor TensorAccessor<std::map<size_t, HostTensorPtr>>::operator()(size_t port) const {
const auto t_iter = m_tensors->find(port);
if (t_iter != m_tensors->cend()) {
auto ptr = t_iter->second.get();
return {ptr->get_element_type(), ptr->get_shape(), ptr->get_data_ptr()};
} else {
return make_tensor_accessor()(port);
}
}
template <>
Tensor TensorAccessor<void>::operator()(size_t) const {
static const auto empty = Tensor();
return empty;
}
auto make_tensor_accessor() -> TensorAccessor<void> {
static const auto empty_tensor_accessor = TensorAccessor<void>(nullptr);
return empty_tensor_accessor;
}
} // namespace ov

View File

@@ -57,11 +57,8 @@ bool op::v0::Tile::evaluate_tile(const HostTensorVector& outputs, const HostTens
auto repeats_val = read_index_vector(axis);
const auto repeats_rank = repeats_val.size();
auto axis_tensor = Tensor(axis->get_element_type(), axis->get_shape(), axis->get_data_ptr());
auto const_map = std::map<size_t, std::reference_wrapper<const Tensor>>{{1, axis_tensor}};
const auto input_shapes = std::vector<ov::PartialShape>{data->get_shape(), axis->get_shape()};
const auto& output_shape = shape_infer(this, input_shapes, const_map).front().to_shape();
const auto& output_shape = shape_infer(this, input_shapes, make_tensor_accessor(inputs)).front().to_shape();
if (!output->get_is_allocated()) {
output->set_shape(output_shape);
}
@@ -86,8 +83,7 @@ bool op::v0::Tile::evaluate(ov::TensorVector& output_values, const ov::TensorVec
std::vector<ov::PartialShape> input_shapes = {data.get_shape(), axis.get_shape()};
auto const_map = std::map<size_t, std::reference_wrapper<const Tensor>>{{1, axis}};
const auto& output_shape = shape_infer(this, input_shapes, const_map).front().to_shape();
const auto& output_shape = shape_infer(this, input_shapes, make_tensor_accessor(input_values)).front().to_shape();
output.set_shape(output_shape);
repeats_val.insert(repeats_val.begin(), output_shape.size() - repeats_rank, 1);
ngraph::runtime::reference::tile(static_cast<const char*>(data.data()),

View File

@@ -34,6 +34,7 @@
#include <utils/shape_inference/shape_inference_cpu.hpp>
#include "utils/debug_capabilities.h"
#include "utils/bit_util.hpp"
#include "dnnl_postops_composer.h"
#include "graph_context.h"
@@ -728,13 +729,9 @@ private:
#endif
};
constexpr uint64_t PortMask(int n) {
return static_cast<uint64_t>(1) << n;
}
template <class... T>
constexpr uint64_t PortMask(int n, T... rest) {
return PortMask(rest...) | (1 << n);
constexpr uint64_t PortMask(T... rest) {
return util::bit::mask(rest...);
}
class Node::NodesFactory : public openvino::cc::Factory<Type,

View File

@@ -0,0 +1,40 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <cstdint>
namespace ov {
namespace util {
namespace bit {
/**
* @brief Make empty bit mask, non of bit is set.
*
* @return 64-bit mask with none of bits set (0).
*/
template <class = void>
constexpr uint64_t mask() {
return 0;
}
/**
* @brief Makes bit mask with bits set at input positions.
*
* @tparam T Type of bit position.
* @tparam Args Type of other bit positions.
*
* @param bit_pos Bit position to set.
* @param other_bits Other bit positions to set.
* @return 64-bit mask.
*/
template <class T, class... Args>
constexpr uint64_t mask(T bit_pos, Args... other_bits) {
return mask(other_bits...) | (static_cast<uint64_t>(1) << bit_pos);
}
} // namespace bit
} // namespace util
} // namespace ov

View File

@@ -90,6 +90,7 @@
#include "transpose_shape_inference.hpp"
#include "unsqueeze_shape_inference.hpp"
#include "utils.hpp"
#include "utils/bit_util.hpp"
#include "variadic_split_shape_inference.hpp"
namespace ov {
@@ -305,9 +306,10 @@ protected:
/**
* @brief Base shape inference object implementing the IStaticShapeInfer without padding support
*
* @tparam TOp Type of operator.
* @tparam TOp Type of operator.
* @tparam mask Bit Mask of data dependent ports.
*/
template <class TOp>
template <class TOp, uint32_t mask>
class ShapeInferBase : public IStaticShapeInfer {
public:
using iface_type = IStaticShapeInfer;
@@ -325,20 +327,12 @@ public:
IShapeInferCommon::Result
infer(const std::vector<StaticShape>& input_shapes, const std::map<size_t, HostTensorPtr>& constant_data) override {
// For backward compatibility, create ov tensors and run shape inference.
TensorVector tensors;
tensors.reserve(constant_data.size());
std::map<size_t, std::reference_wrapper<const Tensor>> const_tensor_map;
for (const auto& c : constant_data) {
tensors.emplace_back(c.second->get_element_type(), c.second->get_shape(), c.second->get_data_ptr());
const_tensor_map.emplace(c.first, tensors.back());
}
return infer(input_shapes, const_tensor_map);
return infer(input_shapes, make_tensor_accessor(constant_data));
}
IShapeInferCommon::Result
infer(const std::vector<StaticShape>& input_shapes, const std::map<size_t, std::reference_wrapper<const Tensor>>& constant_data) override {
auto result = shape_infer(static_cast<TOp*>(m_node.get()), input_shapes, constant_data);
IShapeInferCommon::Result infer(const std::vector<StaticShape>& input_shapes,
const ov::ITensorAccessor& tensor_accessor) override {
auto result = shape_infer(static_cast<TOp*>(m_node.get()), input_shapes, tensor_accessor);
return {std::move(result), ShapeInferStatus::success};
}
@@ -354,9 +348,13 @@ public:
return m_input_ranks;
}
port_mask_t get_port_mask() const override {
return mask;
}
protected:
std::vector<int64_t> m_input_ranks;
std::shared_ptr<Node> m_node;
std::shared_ptr<ov::Node> m_node;
};
/**
@@ -403,18 +401,25 @@ std::shared_ptr<typename TShapeInfer<TOp>::iface_type> make_infer(Args&&... args
return std::make_shared<TShapeInfer<TOp>>(std::forward<Args>(args)...);
}
template <template <class, IStaticShapeInfer::port_mask_t> class TShapeInfer,
class TOp,
IStaticShapeInfer::port_mask_t mask>
std::shared_ptr<typename TShapeInfer<TOp, mask>::iface_type> make_shape_infer(std::shared_ptr<ov::Node> node) {
return std::make_shared<TShapeInfer<TOp, mask>>(std::move(node));
}
template <template <class> class TShapeInfer, class TOp>
std::shared_ptr<typename TShapeInfer<TOp>::iface_type> make_shape_infer(std::shared_ptr<Node> node) {
std::shared_ptr<typename TShapeInfer<TOp>::iface_type> make_shape_infer(std::shared_ptr<ov::Node> node) {
return make_infer<TShapeInfer, TOp>(std::move(node));
}
template <class TShapeInfer>
std::shared_ptr<typename TShapeInfer::iface_type> make_shape_infer(std::shared_ptr<Node> node) {
std::shared_ptr<typename TShapeInfer::iface_type> make_shape_infer(std::shared_ptr<ov::Node> node) {
return std::make_shared<TShapeInfer>(std::move(node));
}
template <template <class, bool> class TConvInfer, class TOp, bool flag>
std::shared_ptr<typename TConvInfer<TOp, flag>::iface_type> make_shape_infer(std::shared_ptr<Node> node) {
std::shared_ptr<typename TConvInfer<TOp, flag>::iface_type> make_shape_infer(std::shared_ptr<ov::Node> node) {
return std::make_shared<TConvInfer<TOp, flag>>(std::move(node));
}
@@ -428,11 +433,12 @@ using namespace ov::opset10;
#define _OV_OP_SHAPE_INFER_VA_REG(OP, ...) \
{ OP::get_type_info_static(), make_shape_infer<__VA_ARGS__> }
#define _OV_OP_SHAPE_INFER_REG(OP, SHAPE_INFER) _OV_OP_SHAPE_INFER_VA_REG(OP, SHAPE_INFER, OP)
#define _OV_OP_SHAPE_INFER_MASK_REG(OP, SHAPE_INFER, MASK) _OV_OP_SHAPE_INFER_VA_REG(OP, SHAPE_INFER, OP, MASK)
#define _OV_OP_NON_TEMPLATE_SHAPE_INFER_REG(OP, SHAPE_INFER) _OV_OP_SHAPE_INFER_VA_REG(OP, SHAPE_INFER)
// Helper types for IShapeInferCommon makers map.
using IShapeInferCommonFactory =
ShapeInferFactory<ShapeInferKey, std::shared_ptr<IShapeInferCommon>, std::shared_ptr<Node>>;
ShapeInferFactory<ShapeInferKey, std::shared_ptr<IShapeInferCommon>, std::shared_ptr<ov::Node>>;
// Initialization map for operators supporting IShapeInferCommon objects.
// First group in map is 'default' opset defined by alias above.
@@ -570,7 +576,7 @@ const IShapeInferCommonFactory::TRegistry IShapeInferCommonFactory::registry{
// Helper types for IStaticShapeInfer makers.
using IStaticShapeInferFactory =
ShapeInferFactory<ShapeInferKey, std::shared_ptr<IStaticShapeInfer>, std::shared_ptr<Node>>;
ShapeInferFactory<ShapeInferKey, std::shared_ptr<IStaticShapeInfer>, std::shared_ptr<ov::Node>>;
// Initialization map for operators supporting IStaticShapeInfer objects.
// First group in map is 'default' opset defined by alias above.
@@ -578,17 +584,18 @@ using IStaticShapeInferFactory =
template <>
const IStaticShapeInferFactory::TRegistry IStaticShapeInferFactory::registry{
// Default opset
_OV_OP_SHAPE_INFER_REG(Tile, ShapeInferBase),
_OV_OP_SHAPE_INFER_MASK_REG(Tile, ShapeInferBase, util::bit::mask(1)),
// Operators shape inferences for specific opset version should be specified below
// opset1
_OV_OP_SHAPE_INFER_REG(opset1::Reverse, ShapeInferBase),
_OV_OP_SHAPE_INFER_MASK_REG(opset1::Reverse, ShapeInferBase, util::bit::mask(1)),
};
#undef _OV_OP_NON_TEMPLATE_SHAPE_INFER_REG
#undef _OV_OP_SHAPE_INFER_MASK_REG
#undef _OV_OP_SHAPE_INFER_REG
#undef _OV_OP_SHAPE_INFER_VA_REG
template <>
std::shared_ptr<IShapeInferCommon> make_shape_inference<IShapeInferCommon>(std::shared_ptr<Node> op) {
std::shared_ptr<IShapeInferCommon> make_shape_inference<IShapeInferCommon>(std::shared_ptr<ov::Node> op) {
if (auto shape_infer = IShapeInferCommonFactory::make(op->get_type_info(), op)) {
return shape_infer;
} else if (auto shape_infer = make_shape_inference<IStaticShapeInfer>(op)) {

View File

@@ -8,8 +8,9 @@
#include <openvino/core/core.hpp>
#include <openvino/core/node.hpp>
#include "static_shape.hpp"
#include "shape_inference_status.hpp"
#include "static_shape.hpp"
#include "tensor_data_accessor.hpp"
namespace ov {
namespace intel_cpu {
@@ -23,7 +24,7 @@ public:
public:
virtual Result infer(const std::vector<StaticShape>& input_shapes,
const std::map<size_t, HostTensorPtr>& constant_data) = 0;
const std::map<size_t, HostTensorPtr>& constant_data) = 0;
// infer may generate padding as by-product, these APIs is designed to retrieve them back
virtual const ov::CoordinateDiff& get_pads_begin() = 0;
@@ -34,11 +35,19 @@ public:
class IStaticShapeInfer : public IShapeInferCommon {
public:
using port_mask_t = uint32_t; //!< Operator's port mask to indicate input data dependency
using IShapeInferCommon::infer;
virtual Result infer(
const std::vector<StaticShape>& input_shapes,
const std::map<size_t, std::reference_wrapper<const Tensor>>& constant_data) = 0;
virtual Result infer(const std::vector<StaticShape>& input_shapes, const ov::ITensorAccessor& tensor_accessor) = 0;
/**
* @brief Some shape inference implementation may require input data stored inside the input tensors. To define
* which inputs data are required, the port mask is used. Each set bit corresponds to the specific input port
* number.
*
* @return port_mask_t a bit mask where each bit corresponds to an input port number.
*/
virtual port_mask_t get_port_mask() const = 0;
};
template <class TShapeInferIface = IShapeInferCommon>

View File

@@ -0,0 +1,84 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "intel_gpu/runtime/memory.hpp"
#include "tensor_data_accessor.hpp"
namespace cldnn {
/**
* @brief CLDNN memory accessor implementing ov::ITensorAccessor to get data as tensor from CLDNN container.
*/
struct MemoryAccessor : public ov::ITensorAccessor {
using container_type = std::map<size_t, memory::ptr>; //!< Alias to cldnn memory map.
/**
* @brief Construct a new Memory Accessor without custom callback.
*
* @param ptrs Pointer to CLDNN memory container pointers.
* @param stream CLDNN stream used for memory locks.
*/
MemoryAccessor(const container_type* ptrs, const stream& stream)
: m_ptrs{ptrs},
m_stream{stream},
m_clbk{},
m_accessed_data{} {}
/**
* @brief Construct a new Memory Accessor with custom callback function.
*
* @param ptrs Pointer to CLDNN memory container pointers.
* @param stream CLDNN stream used for memory locks.
* @param clbk Function object for custom callback when accessing data and not found in CLDNN memories.
*/
MemoryAccessor(const container_type* ptrs, const stream& stream, std::function<const ov::Tensor(size_t)> clbk)
: m_ptrs{ptrs},
m_stream{stream},
m_clbk{std::move(clbk)},
m_accessed_data{} {}
~MemoryAccessor() {
unlock_current_data();
}
/**
* @brief Get data from CLDNN memory container or by custom callback function if defined.
*
* Data get from CLDNN memory are locket until this accessor will be deleted or access new data.
*
* @param port Number of operator port to access data.
* @return Tensor to data.
*/
ov::Tensor operator()(size_t port) const override {
unlock_current_data();
m_accessed_data = nullptr;
const auto t_iter = m_ptrs->find(port);
if (t_iter != m_ptrs->cend()) {
m_accessed_data = t_iter->second;
return {data_type_to_element_type(m_accessed_data->get_layout().data_type),
m_accessed_data->get_layout().get_shape(),
m_accessed_data->lock(m_stream, mem_lock_type::read)};
} else if (m_clbk) {
return m_clbk(port);
} else {
return ov::make_tensor_accessor()(port);
}
}
private:
void unlock_current_data() const {
if (m_accessed_data) {
m_accessed_data->unlock(m_stream);
}
}
const container_type* m_ptrs; //!< Pointer to CLDNN memory pointers with op data.
const stream& m_stream; //!< Current stream used for data lock.
std::function<ov::Tensor(size_t)> m_clbk; //!< Function object to get data if not in m_ptrs.
mutable memory::ptr m_accessed_data; //!< Pointer to current accessed data.
};
} // namespace cldnn

View File

@@ -6,12 +6,14 @@
#include "tile_shape_inference.hpp"
#include "primitive_type_base.h"
#include "memory_accessor.hpp"
#include "intel_gpu/runtime/memory.hpp"
#include "intel_gpu/runtime/format.hpp"
#include "json_object.h"
#include <string>
namespace cldnn {
GPU_DEFINE_PRIMITIVE_TYPE_ID(tile)
layout tile_inst::calc_output_layout(tile_node const& node, kernel_impl_params const& impl_param) {
@@ -44,36 +46,25 @@ std::vector<layout> tile_inst::calc_output_layouts(tile_node const& /*node*/, co
ShapeType repeats_shape = impl_param.input_layouts.size() == 2 ? impl_param.get_input_layout(1).get<ShapeType>()
: ov::Shape{ desc->repeats.size() };
ov::op::v0::Tile op;
std::vector<ShapeType> output_shapes;
std::vector<ShapeType> input_shapes = {
input0_layout.get<ShapeType>(),
repeats_shape
};
auto& constant_mem = impl_param.memory_deps;
if (desc->input_size() == 2) {
if (constant_mem.count(1)) {
auto repeats_mem = constant_mem.at(1);
cldnn::mem_lock<uint8_t, mem_lock_type::read> repeats_lock(repeats_mem, impl_param.get_stream());
const auto& layout = repeats_mem->get_layout();
const auto repeats_tensor =
ov::Tensor(data_type_to_element_type(layout.data_type), layout.get_shape(), repeats_lock.data());
output_shapes = ov::op::v0::shape_infer(&op, input_shapes, {{1, repeats_tensor}});
} else if (repeats_shape.size() > 0 && repeats_shape[0].is_static()) {
output_shapes = { ov::PartialShape::dynamic(std::max<size_t>(input_shapes[0].size(), repeats_shape[0].get_length())) };
} else {
output_shapes = { ov::PartialShape::dynamic() };
}
} else {
auto repeats_data = desc->repeats;
const auto repeats_tensor =
ov::Tensor(data_type_to_element_type(data_types::i64), repeats_shape.to_shape(), repeats_data.data());
output_shapes = ov::op::v0::shape_infer(&op, input_shapes, {{1, repeats_tensor}});
}
auto repeats = desc->repeats;
const auto data_accessor =
MemoryAccessor(&impl_param.memory_deps, impl_param.prog->get_stream(), [&repeats, &repeats_shape](size_t port) {
return (port == 1 && repeats.data()) ? ov::Tensor(data_type_to_element_type(data_types::i64),
repeats_shape.to_shape(),
repeats.data())
: ov::Tensor();
});
std::vector<ShapeType> output_shapes = ov::op::v0::shape_infer(&op, input_shapes, data_accessor);
format output_format = format::adjust_to_rank(input0_layout.format, output_shapes[0].size());
return { layout{output_shapes[0], output_type, output_format} };
return {layout{output_shapes[0], output_type, output_format}};
}
template std::vector<layout> tile_inst::calc_output_layouts<ov::PartialShape>(tile_node const& node, const kernel_impl_params& impl_param);