[core]Migrate Tile operator to new API (#20255)

* Migrate Tile to new API

* Remove visit_attributes as is same as base class
This commit is contained in:
Pawel Raasz 2023-10-12 12:51:04 +02:00 committed by GitHub
parent 23acd5a351
commit 075333e94d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 72 additions and 80 deletions

View File

@ -23,7 +23,6 @@ public:
/// \param repeats The node producing the per-dimension replication factor
Tile(const Output<Node>& data, const Output<Node>& repeats);
bool visit_attributes(AttributeVisitor& visitor) override;
void validate_and_infer_types() override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
@ -31,7 +30,7 @@ public:
bool evaluate_lower(TensorVector& outputs) const override;
bool evaluate_upper(TensorVector& outputs) const override;
bool has_evaluate() const override;
bool evaluate(ov::TensorVector& output_values, const ov::TensorVector& input_values) const override;
bool evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const override;
bool evaluate_label(TensorLabelVector& output_labels) const override;
};
} // namespace v0

View File

@ -5,65 +5,56 @@
#include "openvino/reference/tile.hpp"
#include <algorithm>
#include <cmath>
#include <cstdio>
#include <numeric>
using namespace ov;
namespace {
/// \brief For each axis calculates the product of inner axes
/// If dims has shape (2, 3, 4) then for 2 (first axis) the inner axes would be (3, 4)
/// and for 3 (second axis) it would be (4)
/// If dims has shape(2, 3, 4) then the output vector would be (3 * 4, 4, 1)
/// The outermost axis is not used. For innermost axis it is always 1.
/// \param[in] dims Shape of the output
///
/// \return Vector containing calculated values for each axis.
std::vector<int64_t> create_pitches(const Shape& dims) {
std::vector<int64_t> pitch;
pitch.resize(dims.size() - 1);
std::partial_sum(dims.rbegin(), dims.rend() - 1, pitch.rbegin(), std::multiplies<int64_t>());
pitch.push_back(1);
return pitch;
}
} // namespace
void reference::tile(const char* arg,
char* out,
const Shape& in_shape,
const Shape& out_shape,
const size_t elem_size,
const std::vector<int64_t>& repeats) {
Shape in_shape_expanded(in_shape);
in_shape_expanded.insert(in_shape_expanded.begin(), out_shape.size() - in_shape.size(), 1);
size_t block_size = 0;
int64_t num_repeats = 0;
const int input_rank = static_cast<int>(in_shape_expanded.size());
const int64_t last_dim = in_shape_expanded[input_rank - 1];
const std::vector<int64_t> pitches = create_pitches(out_shape);
const char* copy = nullptr;
std::vector<size_t> indices(in_shape_expanded.size() - 1, 0);
size_t axis = indices.size();
namespace ov {
namespace reference {
/**
* @brief Reference implementation of Tile operator
*
* @param arg Pointer to input data.
* @param out Pointer to output data.
* @param in_shape Input data shape.
* @param out_shape Output data shape.
* @param elem_size Single data element size im bytes.
* @param repeats Vector with repeats values for axes (same rank as out_shape).
*/
void tile(const char* arg,
char* out,
const Shape& in_shape,
const Shape& out_shape,
const size_t elem_size,
const std::vector<int64_t>& repeats) {
if (std::all_of(repeats.begin(), repeats.end(), [](int64_t repeat) {
return repeat == 0;
})) {
return;
}
decltype(arg) copy_from;
typename std::decay<decltype(*in_shape.begin())>::type block_size;
typename std::decay<decltype(*repeats.begin())>::type num_repeats;
auto in_shape_expanded = in_shape;
in_shape_expanded.insert(in_shape_expanded.begin(), out_shape.size() - in_shape.size(), 1);
const auto last_dim = in_shape_expanded.back();
const auto pitches = row_major_strides(out_shape);
std::vector<size_t> indices(in_shape_expanded.size() - 1, 0);
auto axis = indices.size();
// Copy and repeat data for innermost axis as many times as described in the repeats parameter
while (axis <= indices.size()) {
block_size = last_dim * elem_size;
memcpy(out, arg, block_size);
std::memcpy(out, arg, block_size);
out += block_size;
arg += block_size;
copy = out - block_size;
num_repeats = repeats[input_rank - 1] - 1;
copy_from = out - block_size;
num_repeats = repeats.back() - 1;
for (int64_t i = 0; i < num_repeats; ++i) {
memcpy(out, copy, block_size);
std::memcpy(out, copy_from, block_size);
out += block_size;
}
@ -75,14 +66,16 @@ void reference::tile(const char* arg,
}
indices[axis] = 0;
ptrdiff_t pitch = pitches[axis] * in_shape_expanded[axis];
auto pitch = pitches[axis] * in_shape_expanded[axis];
block_size = pitch * elem_size;
copy = out - block_size;
copy_from = out - block_size;
num_repeats = repeats[axis] - 1;
for (int64_t i = 0; i < num_repeats; i++) {
memcpy(out, copy, block_size);
for (int64_t i = 0; i < num_repeats; ++i) {
std::memcpy(out, copy_from, block_size);
out += block_size;
}
}
}
}
} // namespace reference
} // namespace ov

View File

@ -4,24 +4,22 @@
#include "openvino/op/tile.hpp"
#include <tile_shape_inference.hpp>
#include "bound_evaluate.hpp"
#include "itt.hpp"
#include "openvino/op/util/precision_sensitive_attribute.hpp"
#include "openvino/reference/tile.hpp"
#include "tile_shape_inference.hpp"
ov::op::v0::Tile::Tile(const Output<Node>& data, const Output<Node>& repeats) : Op({data, repeats}) {
namespace ov {
namespace op {
namespace v0 {
Tile::Tile(const Output<Node>& data, const Output<Node>& repeats) : Op({data, repeats}) {
ov::mark_as_precision_sensitive(input(1));
constructor_validate_and_infer_types();
}
bool ov::op::v0::Tile::visit_attributes(ov::AttributeVisitor& visitor) {
OV_OP_SCOPE(v0_Tile_visit_attributes);
return true;
}
void ov::op::v0::Tile::validate_and_infer_types() {
void Tile::validate_and_infer_types() {
OV_OP_SCOPE(v0_Tile_validate_and_infer_types);
// Repeats should have integer data type. For now we only allow i64
@ -30,7 +28,6 @@ void ov::op::v0::Tile::validate_and_infer_types() {
repeats_et.is_integral(),
"Tile repeats must have any integer element type, but has ",
repeats_et);
OPENVINO_SUPPRESS_DEPRECATED_START
auto output_shapes = shape_infer(this, get_node_input_partial_shapes(*this));
OPENVINO_SUPPRESS_DEPRECATED_END
@ -40,53 +37,53 @@ void ov::op::v0::Tile::validate_and_infer_types() {
set_input_is_relevant_to_shape(1);
}
std::shared_ptr<ov::Node> ov::op::v0::Tile::clone_with_new_inputs(const OutputVector& new_args) const {
std::shared_ptr<Node> Tile::clone_with_new_inputs(const OutputVector& new_args) const {
OV_OP_SCOPE(v0_Tile_clone_with_new_inputs);
check_new_args_count(this, new_args);
return std::make_shared<Tile>(new_args.at(0), new_args.at(1));
}
bool ov::op::v0::Tile::evaluate(ov::TensorVector& output_values, const ov::TensorVector& input_values) const {
bool Tile::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
OV_OP_SCOPE(v0_Tile_evaluate);
const auto& data = input_values[0];
const auto& axis = input_values[1];
auto& output = output_values[0];
auto repeats_val = get_tensor_data_as<int64_t>(axis, ov::util::Cast<int64_t>());
const auto repeats_rank = repeats_val.size();
OPENVINO_ASSERT(outputs.size() == 1);
OPENVINO_ASSERT(inputs.size() == 2);
std::vector<ov::PartialShape> input_shapes = {data.get_shape(), axis.get_shape()};
const auto& d = inputs[0];
const auto& r = inputs[1];
auto repeats = get_tensor_data_as<int64_t>(r);
const auto& output_shape = shape_infer(this, input_shapes, make_tensor_accessor(input_values)).front().to_shape();
output.set_shape(output_shape);
repeats_val.insert(repeats_val.begin(), output_shape.size() - repeats_rank, 1);
ov::reference::tile(static_cast<const char*>(data.data()),
static_cast<char*>(output.data()),
data.get_shape(),
output_shape,
data.get_element_type().size(),
repeats_val);
const std::vector<ov::PartialShape> input_shapes{d.get_shape(), r.get_shape()};
const auto output_shape = shape_infer(this, input_shapes, make_tensor_accessor(inputs)).front().to_shape();
outputs[0].set_shape(output_shape);
repeats.insert(repeats.begin(), output_shape.size() - repeats.size(), 1);
reference::tile(static_cast<const char*>(d.data()),
static_cast<char*>(outputs[0].data()),
d.get_shape(),
output_shape,
d.get_element_type().size(),
repeats);
return true;
}
bool ov::op::v0::Tile::has_evaluate() const {
bool Tile::has_evaluate() const {
OV_OP_SCOPE(v0_Tile_has_evaluate);
return true;
}
bool ov::op::v0::Tile::evaluate_lower(ov::TensorVector& output_values) const {
bool Tile::evaluate_lower(TensorVector& output_values) const {
OV_OP_SCOPE(v0_Tile_evaluate_lower);
return get_input_tensor(1).has_and_set_bound() && default_lower_bound_evaluator(this, output_values);
}
bool ov::op::v0::Tile::evaluate_upper(ov::TensorVector& output_values) const {
bool Tile::evaluate_upper(TensorVector& output_values) const {
OV_OP_SCOPE(v0_Tile_evaluate_upper);
return get_input_tensor(1).has_and_set_bound() && default_upper_bound_evaluator(this, output_values);
}
bool ov::op::v0::Tile::evaluate_label(TensorLabelVector& output_labels) const {
bool Tile::evaluate_label(TensorLabelVector& output_labels) const {
OV_OP_SCOPE(v0_Tile_evaluate_label);
OPENVINO_ASSERT(output_labels.size() == 1);
@ -94,3 +91,6 @@ bool ov::op::v0::Tile::evaluate_label(TensorLabelVector& output_labels) const {
return get_input_tensor(1).has_and_set_bound() && default_label_evaluator(this, output_labels);
OPENVINO_SUPPRESS_DEPRECATED_END
}
} // namespace v0
} // namespace op
} // namespace ov