[ONNX] Support ONNX importer Slice by ov::opset8::Slice (#7888)

This commit is contained in:
Katarzyna Mitrus
2021-11-17 12:23:28 +01:00
committed by GitHub
parent 9c6cf6171d
commit 6855efd315
7 changed files with 110 additions and 200 deletions

View File

@@ -31,6 +31,11 @@ public:
bool visit_attributes(AttributeVisitor& visitor) override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
OPENVINO_SUPPRESS_DEPRECATED_START
bool has_evaluate() const override;
// TODO: Update to use new evaluate with TensorVector
bool evaluate(const HostTensorVector&, const HostTensorVector&) const override;
OPENVINO_SUPPRESS_DEPRECATED_END
std::shared_ptr<ngraph::op::v0::Constant> get_default_const_axes(const Output<Node>& start) const;
PartialShape calculate_output_shape(const std::vector<int64_t>& starts,

View File

@@ -307,3 +307,73 @@ PartialShape op::v8::Slice::calculate_output_shape(const std::vector<int64_t>& s
}
return output_shape;
}
bool op::v8::Slice::has_evaluate() const {
NGRAPH_OP_SCOPE(v8_Slice_has_evaluate);
switch (get_input_element_type(1)) {
case ngraph::element::i8:
case ngraph::element::i16:
case ngraph::element::i32:
case ngraph::element::i64:
case ngraph::element::u8:
case ngraph::element::u16:
case ngraph::element::u32:
case ngraph::element::u64:
break;
default:
return false;
}
if (get_input_size() > 4) {
switch (get_input_element_type(4)) {
case ngraph::element::i8:
case ngraph::element::i16:
case ngraph::element::i32:
case ngraph::element::i64:
case ngraph::element::u8:
case ngraph::element::u16:
case ngraph::element::u32:
case ngraph::element::u64:
break;
default:
return false;
}
}
return true;
}
bool op::v8::Slice::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const {
NGRAPH_OP_SCOPE(v8_Slice_evaluate);
std::vector<int64_t> starts = host_tensor_2_vector<int64_t>(inputs[1]);
std::vector<int64_t> stops = host_tensor_2_vector<int64_t>(inputs[2]);
std::vector<int64_t> steps = host_tensor_2_vector<int64_t>(inputs[3]);
std::vector<int64_t> axes(starts.size());
if (inputs.size() < 5) {
std::iota(axes.begin(), axes.end(), 0);
} else {
axes = host_tensor_2_vector<int64_t>(inputs[4]);
}
// Static HostTensor data shape is needed to clamp and normalize `start` values
const auto& data_shape = inputs[0]->get_partial_shape();
OPENVINO_ASSERT(data_shape.is_static(), "Can't evaluate Slice elements without static HostTensor data shape.");
// We need calculate static output shape based on HostTensor inputs
PartialShape output_shape = calculate_output_shape(starts, stops, steps, axes, data_shape);
OPENVINO_ASSERT(output_shape.is_static(), "Can't calculate static output shape for Slice evaluation.");
outputs[0]->set_shape(output_shape.to_shape());
outputs[0]->set_element_type(inputs[0]->get_element_type());
ngraph::runtime::reference::slice(inputs[0]->get_data_ptr<char>(),
data_shape.to_shape(),
outputs[0]->get_data_ptr<char>(),
output_shape.to_shape(),
inputs[0]->get_element_type().size(),
starts,
steps,
axes);
return true;
}

View File

@@ -4,220 +4,69 @@
#include "op/slice.hpp"
#include <algorithm>
#include <memory>
#include <vector>
#include "default_opset.hpp"
#include "exceptions.hpp"
#include "ngraph/node.hpp"
#include "ngraph/op/constant.hpp"
#include "ngraph/op/util/op_types.hpp"
#include "ngraph/validation_util.hpp"
#include "onnx_import/core/null_node.hpp"
#include "op/gather.hpp"
#include "utils/common.hpp"
#include "openvino/opsets/opset8.hpp"
namespace ngraph {
namespace onnx_import {
namespace op {
namespace {
std::vector<uint64_t> get_normalized_axes_vector(const Node& onnx_node,
const Rank& data_rank,
const std::vector<int64_t> axes_attr) {
if (data_rank.is_static()) {
const auto normalized_axes_vec = normalize_axes(onnx_node.get_description(), axes_attr, data_rank);
return std::vector<uint64_t>(std::begin(normalized_axes_vec), std::end(normalized_axes_vec));
} else {
CHECK_VALID_NODE(onnx_node,
std::all_of(std::begin(axes_attr),
std::end(axes_attr),
[](int64_t axis) {
return axis >= 0;
}),
"All axes must be positive when data rank is unknown");
return std::vector<uint64_t>(std::begin(axes_attr), std::end(axes_attr));
}
}
/// \brief Transform Slice axes input to mask which is attribute of
/// StridedSlice:v1 interface.
///
/// \note Mask attributes of StridedSlice:10 operator indicates
/// if corresponding begin/end/strides input indices should be applied (0
/// value) or ignored (1 value)
///
/// \param[in] axes Axes input of ONNX Slice operator
/// \param[in] slice_indices_length Length of Slice indices
/// (starts, ends, steps)
///
/// \return Mask attribute in format required by StridedSlice:v1
std::vector<int64_t> axes_to_mask(const std::vector<uint64_t>& axes, uint64_t slice_indices_length) {
std::vector<int64_t> mask(slice_indices_length, 1);
for (auto axis : axes) {
mask[axis] = 0;
}
return mask;
}
/// \brief Adjsut ONNX Slice indices: starts, ends, steps to StridedSlice:v1
/// interface.
///
/// \note StridedSlice:v1 doesn't support axes paramets.
/// The axes parameters detrmines to which dimension of input data slice
/// operation should be applied.
/// The retuned sub-graph provide proper adjustement of Slice indices if
/// it is needed.
///
/// \param[in] indices Parameters of Slice operator: starts, ends,
/// steps.
/// \param[in] axes Determines dimensions on which slice
/// operation should be applied.
/// \param[in] slice_indices_length Indices length after adjustment
/// \param[in] fill_in_value Neutral value (`0` for starts and ends,
/// `1` for steps) which is set to indices
/// in order to provide adjustment.
///
/// \return Sub-graph represents adjusted indices or input indices
/// if any transformation was needed.
Output<ngraph::Node> adjust_indices_if_needed(const Output<ngraph::Node>& indices,
const std::vector<uint64_t>& axes,
uint64_t slice_indices_length,
int64_t fill_in_value) {
const bool are_axes_sorted = std::is_sorted(axes.begin(), axes.end());
const auto indices_shape = indices.get_partial_shape();
// if length of slice indices vector is known
if (indices_shape.rank().is_static() && indices_shape.rank().get_length() == 1 && indices_shape[0].is_static()) {
if (static_cast<uint64_t>(indices_shape[0].get_length()) >= slice_indices_length && are_axes_sorted) {
// adjusting indices is not needed
return indices;
}
}
// Handle a case when starts/ends/steps lengths are less than provided axes
// in order to ensure compatibility with `StridedSlice:v1` interface
// Example:
// data_shape: {3, 3, 3, 3}
// starts: [1, 1] - after extending --> [0, 0, 1, 1]
// ends: [2, 2] - after extending --> [0, 0, 2, 2]
// steps : [0, 1] - after extending --> [1, 1, 0, 1] (`1` is neutral as a
// strides value)
// axes: [2, 3] - apply slice values to 2 and 3 dimension of input data
// expected_output_shape: {3, 3, 1, 1}
OutputVector adjusted_indices(slice_indices_length);
std::vector<uint64_t> target_axes(axes);
const auto gather_axis = default_opset::Constant::create(indices.get_element_type(), {}, {0});
int added_indices_number = 0;
for (uint64_t i = 0; i < slice_indices_length; ++i) {
if (std::find(std::begin(axes), std::end(axes), i) == axes.end()) {
adjusted_indices[i] = default_opset::Constant::create(indices.get_element_type(), {1}, {fill_in_value});
target_axes.insert(std::next(target_axes.begin(), i), i);
++added_indices_number;
} else {
adjusted_indices[i] = std::make_shared<default_opset::Gather>(
indices,
default_opset::Constant::create(indices.get_element_type(), {1}, {i - added_indices_number}),
gather_axis);
}
}
if (!are_axes_sorted) {
OutputVector indices_tmp(adjusted_indices);
for (size_t i = 0; i < target_axes.size(); ++i) {
adjusted_indices[target_axes[i]] = indices_tmp[i];
}
}
return std::make_shared<default_opset::Concat>(adjusted_indices, 0);
}
} // namespace
namespace set_10 {
OutputVector slice(const Node& node) {
using ngraph::op::is_null;
OutputVector inputs{node.get_ng_inputs()};
const auto data = inputs.at(0);
const auto data_rank = data.get_partial_shape().rank();
const auto& data = inputs.at(0);
const auto& starts = inputs.at(1);
const auto& ends = inputs.at(2);
auto starts = inputs.at(1);
auto ends = inputs.at(2);
// Slice is calculated over all axes as default
std::shared_ptr<default_opset::Constant> axes_const;
if (inputs.size() >= 4 && !is_null(inputs.at(3))) // axes input provided
{
axes_const = ngraph::get_constant_from_source(inputs.at(3));
CHECK_VALID_NODE(node, axes_const != nullptr, "Axes input must be constant");
} else {
CHECK_VALID_NODE(node, data_rank.is_static(), "Data rank must be static when axes input is not provided");
const size_t data_rank_value = data_rank.get_length();
axes_const = default_opset::Constant::create(element::i64,
{data_rank_value},
common::get_monotonic_range<int64_t>(data_rank_value));
}
auto raw_axes_vec = axes_const->cast_vector<int64_t>();
std::vector<uint64_t> axes_vec = get_normalized_axes_vector(node, data_rank, raw_axes_vec);
const size_t slice_indices_length = *std::max_element(std::begin(axes_vec), std::end(axes_vec)) + 1;
const auto begin_end_mask = axes_to_mask(axes_vec, slice_indices_length);
const bool axes_input_provided = inputs.size() >= 4 && !is_null(inputs.at(3));
const bool steps_input_provided = inputs.size() == 5 && !is_null(inputs.at(4));
Output<ngraph::Node> steps;
if (inputs.size() == 5 && !is_null(inputs.at(4))) // steps input provided
{
if (steps_input_provided) {
steps = inputs.at(4);
} else {
steps = default_opset::Constant::create(element::i64,
{slice_indices_length},
std::vector<int64_t>(slice_indices_length, 1));
const auto& default_step = default_opset::Constant::create(starts.get_element_type(), {1}, {1});
steps =
std::make_shared<default_opset::Broadcast>(default_step,
std::make_shared<default_opset::ShapeOf>(starts, element::i64));
}
starts = adjust_indices_if_needed(starts, axes_vec, slice_indices_length, 0);
ends = adjust_indices_if_needed(ends, axes_vec, slice_indices_length, 0);
steps = adjust_indices_if_needed(steps, axes_vec, slice_indices_length, 1);
return {std::make_shared<default_opset::StridedSlice>(data, starts, ends, steps, begin_end_mask, begin_end_mask)};
if (axes_input_provided) {
const auto axes = inputs.at(3);
return {std::make_shared<ov::opset8::Slice>(data, starts, ends, steps, axes)};
} else {
return {std::make_shared<ov::opset8::Slice>(data, starts, ends, steps)};
}
}
} // namespace set_10
namespace set_1 {
OutputVector slice(const Node& node) {
Output<ngraph::Node> data = node.get_ng_inputs().at(0);
const auto data_rank = data.get_partial_shape().rank();
const auto starts_atr = node.get_attribute_value<std::vector<int64_t>>("starts");
const auto ends_atr = node.get_attribute_value<std::vector<int64_t>>("ends");
std::shared_ptr<ngraph::Node> starts =
std::make_shared<default_opset::Constant>(element::i64, Shape{starts_atr.size()}, starts_atr);
std::shared_ptr<ngraph::Node> ends =
std::make_shared<default_opset::Constant>(element::i64, Shape{ends_atr.size()}, ends_atr);
const auto starts = std::make_shared<default_opset::Constant>(element::i64, Shape{starts_atr.size()}, starts_atr);
const auto ends = std::make_shared<default_opset::Constant>(element::i64, Shape{ends_atr.size()}, ends_atr);
auto axes = node.get_attribute_value<std::vector<int64_t>>("axes", std::vector<int64_t>());
auto axes_atr = node.get_attribute_value<std::vector<int64_t>>("axes", std::vector<int64_t>());
if (axes.empty()) {
CHECK_VALID_NODE(node, data_rank.is_static(), "Data rank must be static when axes input is not provided");
axes = common::get_monotonic_range<int64_t>(data_rank.get_length());
const auto steps = default_opset::Constant::create(element::i64,
Shape{starts_atr.size()},
std::vector<int64_t>(starts_atr.size(), 1));
if (axes_atr.empty()) {
return {std::make_shared<ov::opset8::Slice>(data, starts, ends, steps)};
} else {
const auto& axes = std::make_shared<default_opset::Constant>(element::i64, Shape{axes_atr.size()}, axes_atr);
return {std::make_shared<ov::opset8::Slice>(data, starts, ends, steps, axes)};
}
std::vector<uint64_t> normalized_axes = get_normalized_axes_vector(node, data_rank, axes);
const size_t slice_indices_length = *std::max_element(std::begin(normalized_axes), std::end(normalized_axes)) + 1;
const auto begin_end_mask = axes_to_mask(normalized_axes, slice_indices_length);
std::shared_ptr<ngraph::Node> strides =
default_opset::Constant::create(element::i64,
Shape{slice_indices_length},
std::vector<int64_t>(slice_indices_length, 1));
starts = adjust_indices_if_needed(starts, normalized_axes, slice_indices_length, 0).get_node_shared_ptr();
ends = adjust_indices_if_needed(ends, normalized_axes, slice_indices_length, 0).get_node_shared_ptr();
strides = adjust_indices_if_needed(strides, normalized_axes, slice_indices_length, 1).get_node_shared_ptr();
return {std::make_shared<default_opset::StridedSlice>(data, starts, ends, strides, begin_end_mask, begin_end_mask)};
}
} // namespace set_1
} // namespace op
} // namespace onnx_import

View File

@@ -32,7 +32,7 @@ graph {
}
initializer {
data_type: 7
dims: 2
dims: 1
int64_data: 1
name: "axes"
}

View File

@@ -987,7 +987,7 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_dyn_shapes_slice_10_default_axes) {
test_case.add_expected_output<float>(Shape{1, 1, 1}, {9});
}
NGRAPH_TEST(${BACKEND_NAME}, onnx_dyn_shapes_10_the_same_output_same) {
NGRAPH_TEST(${BACKEND_NAME}, onnx_dyn_shapes_slice_10_the_same_output_same) {
auto function = onnx_import::import_onnx_model(
file_util::path_join(SERIALIZED_ZOO, "onnx/dynamic_shapes/slice_2d_the_same_out_shape.onnx"));

View File

@@ -20,7 +20,6 @@ from tests import (
xfail_issue_38699,
xfail_issue_38701,
xfail_issue_38706,
xfail_issue_38708,
xfail_issue_38710,
xfail_issue_38713,
xfail_issue_38724,
@@ -216,15 +215,9 @@ tests_expected_to_fail = [
"OnnxBackendNodeModelTest.test_tfidfvectorizer_tf_batch_uniandbigrams_skip5_cpu",
"OnnxBackendNodeModelTest.test_tfidfvectorizer_tf_onlybigrams_skip5_cpu",
),
(xfail_issue_38706, "OnnxBackendNodeModelTest.test_split_zero_size_splits_cpu"),
(
xfail_issue_38708,
"OnnxBackendNodeModelTest.test_slice_default_steps_cpu",
"OnnxBackendNodeModelTest.test_slice_negative_axes_cpu",
"OnnxBackendNodeModelTest.test_slice_neg_steps_cpu",
"OnnxBackendNodeModelTest.test_slice_neg_cpu",
"OnnxBackendNodeModelTest.test_slice_cpu",
"OnnxBackendNodeModelTest.test_slice_end_out_of_bounds_cpu",
xfail_issue_38706,
"OnnxBackendNodeModelTest.test_split_zero_size_splits_cpu",
"OnnxBackendNodeModelTest.test_slice_start_out_of_bounds_cpu",
),
(

View File

@@ -19,7 +19,6 @@ from tests_compatibility import (
xfail_issue_38699,
xfail_issue_38701,
xfail_issue_38706,
xfail_issue_38708,
xfail_issue_38710,
xfail_issue_38713,
xfail_issue_38724,
@@ -201,15 +200,9 @@ tests_expected_to_fail = [
"OnnxBackendNodeModelTest.test_tfidfvectorizer_tf_batch_uniandbigrams_skip5_cpu",
"OnnxBackendNodeModelTest.test_tfidfvectorizer_tf_onlybigrams_skip5_cpu",
),
(xfail_issue_38706, "OnnxBackendNodeModelTest.test_split_zero_size_splits_cpu"),
(
xfail_issue_38708,
"OnnxBackendNodeModelTest.test_slice_default_steps_cpu",
"OnnxBackendNodeModelTest.test_slice_negative_axes_cpu",
"OnnxBackendNodeModelTest.test_slice_neg_steps_cpu",
"OnnxBackendNodeModelTest.test_slice_neg_cpu",
"OnnxBackendNodeModelTest.test_slice_cpu",
"OnnxBackendNodeModelTest.test_slice_end_out_of_bounds_cpu",
xfail_issue_38706,
"OnnxBackendNodeModelTest.test_split_zero_size_splits_cpu",
"OnnxBackendNodeModelTest.test_slice_start_out_of_bounds_cpu",
),
(