Unique-10 reference implementation (#13945)

* Basic Unique impl

* Shape inference tests

* More type prop tests

* Dynamic input tests

* Visitor tests

* Code formatting

* Minor corrections

* Modern c++ trick to make MSVC happy

* Refactor

* Code review

* More tests with dynamic shapes

* Op check test for Unique

* Well, a whitespace

* Different way of out shape calculation

* Spec

* Axis values constraint in the docs

* Zero-dim handling

* opset10_dump test adjustment

* 0D implementation and some test infra adjustments

* More coverage for scalar inputs

* type dependent test case generators

* A working version for 1D with all outputs

* Tensor shape calculation helper

* 1D handling with sorted mode included

* Handling of no-axis cases

* Some tests for no-axis case

* Unique's evaluate method

* A working fallback for the CPU plugin

* evaluate() removal

* Corrected shape inference

* More type prop fixes

* Unified axis extraction helpers

* Cleanup and preparation for slices processing

* Slice descriptor struct

* Static shapes generation with axis

* N-D solution with axis handling

* Slices comparison impl

* Fix Windows compilation errors

* Apply defaulting principles

Co-authored-by: Tomasz Jankowski <tomasz1.jankowski@intel.com>
This commit is contained in:
Tomasz Dołbniak
2022-11-18 10:12:09 +01:00
committed by GitHub
parent bc90ed740f
commit 5ff0f5d84c
9 changed files with 893 additions and 51 deletions

View File

@@ -0,0 +1,323 @@
// Copyright (C) 2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "gather.hpp"
#include "ngraph/coordinate_index.hpp"
#include "ngraph/coordinate_transform.hpp"
#include "ngraph/shape.hpp"
namespace ngraph {
namespace runtime {
namespace reference {
enum class DescriptorType { SINGLE_VALUE, SLICE };
template <typename Index_t, typename Count_t>
struct TensorSlice {
TensorSlice(const Index_t idx_, const DescriptorType descriptor_type_)
: idx{idx_},
descriptor_type{descriptor_type_} {}
TensorSlice(const Index_t idx_, const Index_t rev_idx_, const Count_t count_)
: idx{idx_},
rev_idx{rev_idx_},
count{count_} {}
/// The index of the current element in the original input tensor. It never changes even if the elements get
/// sorted. This value is used as a mapping between a unique element in the first output tensor and the position
/// of this element in the original input tensor.
Index_t idx = 0;
/// The rev_idx is a mapping between every element in the original input and the location of a unique element
/// in the first output tensor. More than one Element can have the same rev_idx.
Index_t rev_idx = -1;
/// The number of occurrences of a given element in the input tensor. This value is different than one only for
/// duplicates found in the input tensor.
Count_t count = 1;
/// Indicates if this object points to a single value in the input tensor (rather than a slice of the tensor)
DescriptorType descriptor_type = DescriptorType::SINGLE_VALUE;
};
template <typename Index_t, typename Count_t>
struct UniqueElements {
/// Contains descriptors of all elements in the input tensor. Possibly sorted by value.
std::vector<TensorSlice<Index_t, Count_t>> all_tensor_elements;
/// Subset of all tensor elements. First occurrences of the unique values.
std::vector<TensorSlice<Index_t, Count_t>> unique_tensor_elements;
/// Axis (optional). Used to gather unique elements over a given dimension.
int64_t axis = 0;
};
namespace {
// Generates descriptors of slices or individual elems of the input tensor. This function returns a vector of
// helper objects representing elements that the "unique" algorithm is supposed to process later.
template <typename Index_t, typename Count_t>
std::vector<TensorSlice<Index_t, Count_t>> generate_descriptors(const size_t count, const DescriptorType type) {
std::vector<TensorSlice<Index_t, Count_t>> descriptors;
descriptors.reserve(count);
for (Index_t i = 0; i < count; ++i) {
descriptors.emplace_back(i, type);
}
return descriptors;
}
// Returns indices of the first element of each tensor slice. The index is equal to a coordinate index.
template <typename Index_t, typename Count_t>
inline std::pair<size_t, size_t> first_elems_of_both_slices(const TensorSlice<Index_t, Count_t>& lhs,
const TensorSlice<Index_t, Count_t>& rhs,
const std::vector<size_t>& data_shape_strides,
const int64_t axis) {
return {data_shape_strides[axis] * lhs.idx, data_shape_strides[axis] * rhs.idx};
}
template <typename Index_t, typename Count_t>
inline size_t calc_slices_offset(const TensorSlice<Index_t, Count_t>& lhs,
const TensorSlice<Index_t, Count_t>& rhs,
const std::vector<size_t>& data_shape_strides,
const int64_t axis) {
const auto first_elem_indices = first_elems_of_both_slices(lhs, rhs, data_shape_strides, axis);
if (first_elem_indices.first > first_elem_indices.second) {
return first_elem_indices.first - first_elem_indices.second;
} else {
return first_elem_indices.second - first_elem_indices.first;
}
}
inline Shape slice_shape_to_iterate(Shape data_shape, const int64_t axis) {
data_shape.erase(data_shape.begin() + axis, data_shape.begin() + axis + 1);
return data_shape;
}
bool scalar_or_single_element(const Shape& s) {
return std::all_of(std::begin(s), std::end(s), [](Shape::value_type d) {
return d == 1;
});
}
} // namespace
template <typename Data_t, typename Index_t, typename Count_t = int64_t>
UniqueElements<Index_t, Count_t> find_unique_elements(const Data_t* data,
const Shape& data_shape,
std::unique_ptr<int64_t> axis,
const bool sorted) {
using std::begin;
using std::end;
const auto data_shape_strides = ngraph::row_major_strides(data_shape);
const auto ascending_order = [&data](const TensorSlice<Index_t, Count_t>& lhs,
const TensorSlice<Index_t, Count_t>& rhs) {
return *(data + lhs.idx) < *(data + rhs.idx);
};
const auto slices_ascending_order = [&](const TensorSlice<Index_t, Count_t>& lhs,
const TensorSlice<Index_t, Count_t>& rhs) {
const auto slices_offset = calc_slices_offset(lhs, rhs, data_shape_strides, *axis);
const auto shape_to_iterate = slice_shape_to_iterate(data_shape, *axis);
for (auto it = CoordinateIterator(shape_to_iterate); it != CoordinateIterator::end(); ++it) {
auto elem_coord = *it;
elem_coord.insert(elem_coord.cbegin() + *axis, lhs.idx);
const auto lhs_elem_idx = ngraph::coordinate_index(elem_coord, data_shape);
const auto rhs_elem_idx = lhs_elem_idx + slices_offset;
if (*(data + rhs_elem_idx) > *(data + lhs_elem_idx)) {
return false;
}
}
return true;
};
const auto elements_are_equal = [&data](const TensorSlice<Index_t, Count_t>& lhs,
const TensorSlice<Index_t, Count_t>& rhs) {
return *(data + lhs.idx) == *(data + rhs.idx);
};
const auto slices_are_equal = [&](const TensorSlice<Index_t, Count_t>& lhs,
const TensorSlice<Index_t, Count_t>& rhs) {
const auto& slice_with_lower_idx =
std::min(lhs, rhs, [](const TensorSlice<Index_t, Count_t>& a, const TensorSlice<Index_t, Count_t>& b) {
return a.idx < b.idx;
});
// the individual elements in the two compared slices are always separated by the same offset
// and this can be used to compare them elementwise
const auto slices_offset = calc_slices_offset(lhs, rhs, data_shape_strides, *axis);
const auto shape_to_iterate = slice_shape_to_iterate(data_shape, *axis);
for (auto it = CoordinateIterator(shape_to_iterate); it != CoordinateIterator::end(); ++it) {
// All slice elements have a "slice index" constant value at the axis position, only the other dimensions
// vary for each slice element. Those dimensions are provided by CoordinateIterator, the value at axis
// needs to be injected manually.
auto elem_coord = *it;
elem_coord.insert(elem_coord.cbegin() + *axis, slice_with_lower_idx.idx);
const auto lhs_elem_idx = ngraph::coordinate_index(elem_coord, data_shape);
const auto rhs_elem_idx = lhs_elem_idx + slices_offset;
if (*(data + lhs_elem_idx) != *(data + rhs_elem_idx)) {
return false;
}
}
return true;
};
const auto already_unique = [&elements_are_equal](const TensorSlice<Index_t, Count_t>& existing_unique_elem) {
return [&elements_are_equal, &existing_unique_elem](const TensorSlice<Index_t, Count_t>& x) {
return elements_are_equal(existing_unique_elem, x);
};
};
const auto already_unique_slice = [&slices_are_equal](const TensorSlice<Index_t, Count_t>& existing_unique_elem) {
return [&slices_are_equal, &existing_unique_elem](const TensorSlice<Index_t, Count_t>& x) {
return slices_are_equal(existing_unique_elem, x);
};
};
UniqueElements<Index_t, Count_t> ret;
if (scalar_or_single_element(data_shape)) {
ret.all_tensor_elements.emplace_back(0, 0, 1);
ret.unique_tensor_elements.emplace_back(0, 0, 1);
return ret;
} else if (!axis || (is_vector(data_shape) && data_shape[0] > 1)) { // 1D or N-D without any axis
const auto data_elems_count = shape_size(data_shape);
ret.all_tensor_elements =
generate_descriptors<Index_t, Count_t>(data_elems_count, DescriptorType::SINGLE_VALUE);
if (sorted) {
std::sort(begin(ret.all_tensor_elements), end(ret.all_tensor_elements), ascending_order);
}
ret.all_tensor_elements[0].rev_idx = 0;
ret.unique_tensor_elements.push_back(ret.all_tensor_elements[0]);
for (size_t i = 1; i < data_elems_count; ++i) {
auto& tensor_element = ret.all_tensor_elements[i];
auto existing_unique = end(ret.unique_tensor_elements);
if (sorted) {
existing_unique = std::lower_bound(begin(ret.unique_tensor_elements),
end(ret.unique_tensor_elements),
tensor_element,
ascending_order);
} else {
existing_unique = std::find_if(begin(ret.unique_tensor_elements),
end(ret.unique_tensor_elements),
already_unique(tensor_element));
}
if (existing_unique != end(ret.unique_tensor_elements)) {
tensor_element.rev_idx = existing_unique->rev_idx;
existing_unique->count++;
} else {
tensor_element.rev_idx = ret.unique_tensor_elements.size();
ret.unique_tensor_elements.push_back(tensor_element);
}
}
} else {
ret.axis = *axis;
ret.all_tensor_elements = generate_descriptors<Index_t, Count_t>(data_shape[*axis], DescriptorType::SLICE);
if (sorted) {
std::sort(begin(ret.all_tensor_elements), end(ret.all_tensor_elements), slices_ascending_order);
}
ret.all_tensor_elements[0].rev_idx = 0;
ret.unique_tensor_elements.push_back(ret.all_tensor_elements[0]);
for (size_t i = 1; i < data_shape[*axis]; ++i) {
auto& tensor_element = ret.all_tensor_elements[i];
auto existing_unique = end(ret.unique_tensor_elements);
if (sorted) {
existing_unique = std::lower_bound(begin(ret.unique_tensor_elements),
end(ret.unique_tensor_elements),
tensor_element,
slices_ascending_order);
} else {
existing_unique = std::find_if(begin(ret.unique_tensor_elements),
end(ret.unique_tensor_elements),
already_unique_slice(tensor_element));
}
if (existing_unique != end(ret.unique_tensor_elements)) {
tensor_element.rev_idx = existing_unique->rev_idx;
existing_unique->count++;
} else {
tensor_element.rev_idx = ret.unique_tensor_elements.size();
ret.unique_tensor_elements.push_back(tensor_element);
}
}
}
return ret;
}
template <typename Index_t, typename Count_t = int64_t>
std::tuple<Shape, Shape, Shape> make_tensor_shapes(const UniqueElements<Index_t, Count_t>& unique_elements,
const Shape& data_shape,
std::unique_ptr<int64_t> axis) {
if (axis) {
// if the axis was specified we need to return a data shape with a modified dimension-at-axis
// this is where we need to insert the number of detected unique elements
// all other dimensions stay the same as in the original data_shape
auto output0 = data_shape;
output0[*axis] = unique_elements.unique_tensor_elements.size();
const auto output1_3 = Shape{unique_elements.unique_tensor_elements.size()};
const auto output2 = Shape{data_shape[*axis]};
return std::make_tuple(output0, output1_3, output2);
} else {
const auto output0 = Shape{unique_elements.unique_tensor_elements.size()};
const auto output1_3 = output0;
const auto output2 = Shape{unique_elements.all_tensor_elements.size()};
return std::make_tuple(output0, output1_3, output2);
}
}
template <typename Data_t, typename Index_t, typename Count_t = int64_t>
void unique(Data_t* out_unique_elements,
Index_t* out_indices,
Index_t* out_rev_indices,
Count_t* out_counts,
const Data_t* data,
const Shape& data_shape,
const Shape& out_shape,
const UniqueElements<Index_t, Count_t>& descriptors) {
if (descriptors.unique_tensor_elements[0].descriptor_type == DescriptorType::SINGLE_VALUE) {
for (size_t i = 0; i < descriptors.unique_tensor_elements.size(); ++i) {
const auto& descriptor = descriptors.unique_tensor_elements[i];
out_unique_elements[i] = *(data + descriptor.idx);
out_indices[i] = descriptor.idx;
out_counts[i] = descriptor.count;
}
} else {
std::vector<Index_t> indices;
indices.reserve(descriptors.unique_tensor_elements.size());
for (size_t i = 0; i < descriptors.unique_tensor_elements.size(); ++i) {
const auto& descriptor = descriptors.unique_tensor_elements[i];
out_indices[i] = descriptor.idx;
out_counts[i] = descriptor.count;
indices.push_back(descriptor.idx);
}
ngraph::runtime::reference::gather(data,
indices.data(),
out_unique_elements,
data_shape,
Shape{descriptors.unique_tensor_elements.size()},
out_shape,
descriptors.axis);
}
// filling out this output tensor requires a separate pass over all elements of the input tensor
// for each input element we need to output and index fo that element in the first output tensor
// additionally if sorting was involved the "all_tensor_elements" might be ordered differently than the elements
// in the original input tensor - this is why descriptor.idx is used for indexing the output tensor below
for (const auto& descriptor : descriptors.all_tensor_elements) {
out_rev_indices[descriptor.idx] = descriptor.rev_idx;
}
}
} // namespace reference
} // namespace runtime
} // namespace ngraph

View File

@@ -5,6 +5,7 @@
#include "openvino/op/unique.hpp"
#include "itt.hpp"
#include "ngraph/runtime/reference/unique.hpp"
#include "ngraph/validation_util.hpp"
#include "openvino/op/util/op_types.hpp"
@@ -14,6 +15,80 @@ int64_t extract_axis(const std::shared_ptr<op::v0::Constant>& axis_constant) {
const auto axis_vec = axis_constant->cast_vector<int64_t>();
return axis_vec.at(0);
}
template <typename T, typename Index_t = int32_t, typename Counts_t = int32_t>
ngraph::runtime::reference::UniqueElements<Index_t, Counts_t> call_unique(const Tensor& input,
std::unique_ptr<int64_t> axis,
const bool sorted) {
return ngraph::runtime::reference::find_unique_elements<T, Index_t, Counts_t>(input.data<T>(),
input.get_shape(),
std::move(axis),
sorted);
}
std::tuple<Shape, Shape, Shape> calculate_static_output_shapes(const Tensor& input_data, const op::v10::Unique& op) {
using Index_t = int32_t;
using Counts_t = int32_t;
const auto maybe_extract_axis = [&op]() {
std::unique_ptr<int64_t> axis;
if (op.get_input_size() == 2 && ov::op::util::is_constant(op.input_value(1).get_node())) {
const auto axis_constant =
std::dynamic_pointer_cast<op::v0::Constant>(op.input_value(1).get_node_shared_ptr());
axis = std::unique_ptr<int64_t>(new int64_t{extract_axis(axis_constant)});
}
return axis;
};
ngraph::runtime::reference::UniqueElements<Index_t, Counts_t> unique_elements;
std::unique_ptr<int64_t> axis = maybe_extract_axis();
switch (op.get_input_element_type(0)) {
case element::boolean:
unique_elements = call_unique<bool>(input_data, std::move(axis), op.get_sorted());
break;
case element::i8:
unique_elements = call_unique<int8_t>(input_data, std::move(axis), op.get_sorted());
break;
case element::i16:
unique_elements = call_unique<int16_t>(input_data, std::move(axis), op.get_sorted());
break;
case element::i32:
unique_elements = call_unique<int32_t>(input_data, std::move(axis), op.get_sorted());
break;
case element::i64:
unique_elements = call_unique<int64_t>(input_data, std::move(axis), op.get_sorted());
break;
case element::u8:
unique_elements = call_unique<uint8_t>(input_data, std::move(axis), op.get_sorted());
break;
case element::u16:
unique_elements = call_unique<uint16_t>(input_data, std::move(axis), op.get_sorted());
break;
case element::u32:
unique_elements = call_unique<uint32_t>(input_data, std::move(axis), op.get_sorted());
break;
case element::u64:
unique_elements = call_unique<uint64_t>(input_data, std::move(axis), op.get_sorted());
break;
case element::bf16:
unique_elements = call_unique<bfloat16>(input_data, std::move(axis), op.get_sorted());
break;
case element::f16:
unique_elements = call_unique<float16>(input_data, std::move(axis), op.get_sorted());
break;
case element::f32:
unique_elements = call_unique<float>(input_data, std::move(axis), op.get_sorted());
break;
case element::f64:
unique_elements = call_unique<double>(input_data, std::move(axis), op.get_sorted());
break;
}
return ngraph::runtime::reference::make_tensor_shapes(unique_elements,
input_data.get_shape(),
maybe_extract_axis());
}
} // namespace
op::v10::Unique::Unique(const Output<Node>& data, const bool sorted, const element::Type& index_element_type)
@@ -57,57 +132,75 @@ void op::v10::Unique::validate_and_infer_types() {
output_shapes[0] = PartialShape::dynamic();
output_shapes[1] =
input_tensor_capacity > 0 ? PartialShape{{1, input_tensor_capacity}} : PartialShape{{Dimension::dynamic()}};
output_shapes[2] = output_shapes[1];
output_shapes[2] =
input_tensor_capacity > 0 ? PartialShape{{input_tensor_capacity}} : PartialShape{{Dimension::dynamic()}};
output_shapes[3] = output_shapes[1];
if (get_input_size() == 2) {
NODE_VALIDATION_CHECK(
this,
get_input_element_type(1) == element::i32 || get_input_element_type(1) == element::i64,
"The allowed element types of the 'axis' input tensor of the Unique operator are i32 and i64.");
if (ov::op::util::is_constant(input_value(0).get_node())) {
const auto input_const = std::dynamic_pointer_cast<op::v0::Constant>(input_value(0).get_node_shared_ptr());
ov::Tensor input_data = ov::Tensor(input_const->get_element_type(), input_const->get_shape());
memcpy(input_data.data(), input_const->get_data_ptr(), input_data.get_byte_size());
const auto tensor_shapes = calculate_static_output_shapes(input_data, *this);
NODE_VALIDATION_CHECK(
this,
get_input_partial_shape(1) == Shape{} || get_input_partial_shape(1) == Shape{1},
"The 'axis' input tensor of the Unique operator must be a scalar or 1D tensor with 1 element.");
output_shapes[0] = std::get<0>(tensor_shapes);
output_shapes[1] = std::get<1>(tensor_shapes);
output_shapes[2] = std::get<2>(tensor_shapes);
output_shapes[3] = std::get<1>(tensor_shapes);
} else {
if (get_input_size() == 2) {
NODE_VALIDATION_CHECK(
this,
get_input_element_type(1) == element::i32 || get_input_element_type(1) == element::i64,
"The allowed element types of the 'axis' input tensor of the Unique operator are i32 and i64.");
NODE_VALIDATION_CHECK(this,
ov::op::util::is_constant(input_value(1).get_node()),
"The 'axis' input of the Unique operator must be connected to a Constant.");
const int64_t axis =
extract_axis(std::dynamic_pointer_cast<op::v0::Constant>(input_value(1).get_node_shared_ptr()));
NODE_VALIDATION_CHECK(
this,
get_input_partial_shape(1) == Shape{} || get_input_partial_shape(1) == Shape{1},
"The 'axis' input tensor of the Unique operator must be a scalar or 1D tensor with 1 element.");
if (input_shape.rank().is_static()) {
const auto normalized_axis = ngraph::normalize_axis(this, axis, input_shape.rank());
const auto dim_at_axis = input_shape[normalized_axis];
NODE_VALIDATION_CHECK(this,
ov::op::util::is_constant(input_value(1).get_node()),
"The 'axis' input of the Unique operator must be connected to a Constant.");
const int64_t axis =
extract_axis(std::dynamic_pointer_cast<op::v0::Constant>(input_value(1).get_node_shared_ptr()));
Dimension output_dim_at_axis;
if (dim_at_axis.is_dynamic()) {
if (dim_at_axis == Dimension::dynamic()) {
output_dim_at_axis = dim_at_axis;
if (input_shape.rank().is_static()) {
const auto normalized_axis = ngraph::normalize_axis(this, axis, input_shape.rank());
const auto dim_at_axis = input_shape[normalized_axis];
Dimension output_dim_at_axis;
Dimension rev_idx_size;
if (dim_at_axis.is_dynamic()) {
if (dim_at_axis == Dimension::dynamic()) {
output_dim_at_axis = dim_at_axis;
} else {
output_dim_at_axis = Dimension{1, dim_at_axis.get_max_length()};
}
rev_idx_size = dim_at_axis;
} else if (dim_at_axis.get_length() == 0) {
output_dim_at_axis = Dimension{0};
output_shapes[1] = PartialShape{{0}};
rev_idx_size = output_dim_at_axis;
output_shapes[3] = PartialShape{{0}};
} else {
output_dim_at_axis = Dimension{1, dim_at_axis.get_max_length()};
rev_idx_size = Dimension{dim_at_axis.get_max_length()};
}
} else if (dim_at_axis.get_length() == 0) {
output_dim_at_axis = Dimension{0};
output_shapes[1] = PartialShape{{0}};
output_shapes[2] = PartialShape{{0}};
output_shapes[3] = PartialShape{{0}};
} else {
output_dim_at_axis = Dimension{1, dim_at_axis.get_max_length()};
}
auto output_shape = input_shape;
output_shape[normalized_axis] = output_dim_at_axis;
output_shapes[0] = output_shape;
}
} else {
// no axis => flattened input tensor
if (input_shape.is_static()) {
// between 1 and the total number of input tensor's unique elements
output_shapes[0] = PartialShape{{Dimension{1, input_tensor_capacity}}};
auto output_shape = input_shape;
output_shape[normalized_axis] = output_dim_at_axis;
output_shapes[0] = output_shape;
output_shapes[2] = PartialShape{rev_idx_size};
}
} else {
output_shapes[0] = PartialShape{{Dimension::dynamic()}};
// no axis => flattened input tensor
if (input_shape.is_static()) {
// between 1 and the total number of input tensor's elements
output_shapes[0] = PartialShape{{Dimension{1, input_tensor_capacity}}};
} else {
output_shapes[0] = PartialShape{{Dimension::dynamic()}};
}
}
}

View File

@@ -36,7 +36,7 @@ TEST(type_prop, unique_no_axis_3d) {
CHECK_ELEMENT_TYPES(unique, {{element::f32, element::i64, element::i64, element::i64}});
CHECK_OUTPUT_SHAPES(unique,
{{PartialShape{{1, 16}}, PartialShape{{1, 16}}, PartialShape{{1, 16}}, PartialShape{{1, 16}}}});
{{PartialShape{{1, 16}}, PartialShape{{1, 16}}, PartialShape{{16}}, PartialShape{{1, 16}}}});
}
TEST(type_prop, unique_no_axis_3d_index_type_i32) {
@@ -45,7 +45,7 @@ TEST(type_prop, unique_no_axis_3d_index_type_i32) {
CHECK_ELEMENT_TYPES(unique, {{element::f32, element::i32, element::i32, element::i64}});
CHECK_OUTPUT_SHAPES(unique,
{{PartialShape{{1, 9}}, PartialShape{{1, 9}}, PartialShape{{1, 9}}, PartialShape{{1, 9}}}});
{{PartialShape{{1, 9}}, PartialShape{{1, 9}}, PartialShape{{9}}, PartialShape{{1, 9}}}});
}
TEST(type_prop, unique_no_axis_scalar) {
@@ -72,7 +72,7 @@ TEST(type_prop, unique_3d_scalar_axis) {
CHECK_ELEMENT_TYPES(unique, {{element::f32, element::i64, element::i64, element::i64}});
CHECK_OUTPUT_SHAPES(
unique,
{{PartialShape{{2}, {1, 4}, {2}}, PartialShape{{1, 16}}, PartialShape{{1, 16}}, PartialShape{{1, 16}}}});
{{PartialShape{{2}, {1, 4}, {2}}, PartialShape{{1, 16}}, PartialShape{{4}}, PartialShape{{1, 16}}}});
}
TEST(type_prop, unique_3d_axis_1d) {
@@ -83,7 +83,7 @@ TEST(type_prop, unique_3d_axis_1d) {
CHECK_ELEMENT_TYPES(unique, {{element::f32, element::i64, element::i64, element::i64}});
CHECK_OUTPUT_SHAPES(
unique,
{{PartialShape{{2}, {4}, {1, 2}}, PartialShape{{1, 16}}, PartialShape{{1, 16}}, PartialShape{{1, 16}}}});
{{PartialShape{{2}, {4}, {1, 2}}, PartialShape{{1, 16}}, PartialShape{{2}}, PartialShape{{1, 16}}}});
}
TEST(type_prop, unique_3d_negative_axis) {
@@ -94,7 +94,7 @@ TEST(type_prop, unique_3d_negative_axis) {
CHECK_ELEMENT_TYPES(unique, {{element::f32, element::i64, element::i64, element::i64}});
CHECK_OUTPUT_SHAPES(
unique,
{{PartialShape{{1, 2}, {4}, {2}}, PartialShape{{1, 16}}, PartialShape{{1, 16}}, PartialShape{{1, 16}}}});
{{PartialShape{{1, 2}, {4}, {2}}, PartialShape{{1, 16}}, PartialShape{{2}}, PartialShape{{1, 16}}}});
}
TEST(type_prop, unique_dynamic_dim_at_axis) {
@@ -115,7 +115,7 @@ TEST(type_prop, unique_dim_with_intervals_at_axis) {
CHECK_ELEMENT_TYPES(unique, {{element::f32, element::i64, element::i64, element::i64}});
CHECK_OUTPUT_SHAPES(
unique,
{{PartialShape{{2}, {1, 10}, {2}}, PartialShape{{-1}}, PartialShape{{-1}}, PartialShape{{-1}}}});
{{PartialShape{{2}, {1, 10}, {2}}, PartialShape{{-1}}, PartialShape{{2, 10}}, PartialShape{{-1}}}});
}
TEST(type_prop, unique_dynamic_rank) {
@@ -197,3 +197,10 @@ TEST(type_prop, unique_with_zero_dimension) {
CHECK_OUTPUT_SHAPES(unique, {{PartialShape{{1, 0, 2}}, PartialShape{{0}}, PartialShape{{0}}, PartialShape{{0}}}});
}
TEST(type_prop, unique_with_constant_input_no_axis) {
const auto data = opset10::Constant::create(element::i32, Shape{5}, {5, 1, 4, 2, 5});
const auto unique = make_shared<opset10::Unique>(data);
CHECK_OUTPUT_SHAPES(unique, {{Shape{{4}}, Shape{{4}}, Shape{{5}}, Shape{{4}}}});
}

View File

@@ -87,6 +87,7 @@
#include <ngraph/runtime/reference/squared_difference.hpp>
#include <ngraph/runtime/reference/tanh.hpp>
#include <ngraph/runtime/reference/tensor_iterator.hpp>
#include <ngraph/runtime/reference/unique.hpp>
#include <ngraph/runtime/reference/utils/nms_common.hpp>
#include "backend.hpp"
@@ -4200,6 +4201,63 @@ bool evaluate(const shared_ptr<op::v9::SoftSign>& op, const HostTensorVector& ou
return true;
}
template <typename Data_t, typename Index_t>
void execute_unique(const HostTensorVector& outputs,
const HostTensorVector& inputs,
const shared_ptr<op::v10::Unique>& op) {
const auto maybe_extract_axis = [&op]() {
std::unique_ptr<int64_t> axis;
if (op->get_input_size() == 2 && ov::op::util::is_constant(op->input_value(1).get_node())) {
const auto axis_constant =
std::dynamic_pointer_cast<op::v0::Constant>(op->input_value(1).get_node_shared_ptr());
const auto axis_vec = axis_constant->cast_vector<int64_t>();
axis = std::unique_ptr<int64_t>(new int64_t{axis_vec.at(0)});
}
return axis;
};
const auto unique_elements =
runtime::reference::find_unique_elements<Data_t, Index_t>(inputs[0]->get_data_ptr<Data_t>(),
inputs[0]->get_shape(),
maybe_extract_axis(),
op->get_sorted());
const auto tensor_shapes =
runtime::reference::make_tensor_shapes(unique_elements, inputs[0]->get_shape(), maybe_extract_axis());
auto& out_unique_elements = outputs[0];
auto& out_indices = outputs[1];
auto& out_rev_indices = outputs[2];
auto& out_counts = outputs[3];
out_unique_elements->set_shape(std::get<0>(tensor_shapes));
out_indices->set_shape(std::get<1>(tensor_shapes));
out_rev_indices->set_shape(std::get<2>(tensor_shapes));
out_counts->set_shape(std::get<1>(tensor_shapes));
runtime::reference::unique(out_unique_elements->get_data_ptr<Data_t>(),
out_indices->get_data_ptr<Index_t>(),
out_rev_indices->get_data_ptr<Index_t>(),
out_counts->get_data_ptr<int64_t>(),
inputs[0]->get_data_ptr<Data_t>(),
inputs[0]->get_shape(),
std::get<0>(tensor_shapes),
unique_elements);
}
template <element::Type_t Data_ET>
bool evaluate(const shared_ptr<op::v10::Unique>& op, const HostTensorVector& outputs, const HostTensorVector& inputs) {
using Data_t = typename element_type_traits<Data_ET>::value_type;
if (op->get_index_element_type() == element::i32) {
execute_unique<Data_t, int32_t>(outputs, inputs, op);
} else if (op->get_index_element_type() == element::i64) {
execute_unique<Data_t, int64_t>(outputs, inputs, op);
} else {
return false;
}
return true;
}
template <element::Type_t ET>
bool evaluate(const shared_ptr<op::v10::IsFinite>& op,
const HostTensorVector& outputs,

View File

@@ -144,6 +144,7 @@ _OPENVINO_OP_REG(MulticlassNms, op::v9)
_OPENVINO_OP_REG(IsFinite, op::v10)
_OPENVINO_OP_REG(IsInf, op::v10)
_OPENVINO_OP_REG(IsNaN, op::v10)
_OPENVINO_OP_REG(Unique, op::v10)
_OPENVINO_OP_REG(AUGRUCell, ov::op::internal)
_OPENVINO_OP_REG(AUGRUSequence, ov::op::internal)

View File

@@ -71,14 +71,16 @@ void CommonReferenceTest::Validate() {
ASSERT_EQ(refOutData.size(), actualOutData.size());
for (size_t i = 0; i < refOutData.size(); i++) {
ValidateBlobs(refOutData[i], actualOutData[i], threshold, abs_threshold, actual_comparision_size);
ValidateBlobs(refOutData[i], actualOutData[i], i, threshold, abs_threshold, actual_comparision_size);
}
}
void CommonReferenceTest::ValidateBlobs(const ov::Tensor& refBlob, const ov::Tensor& outBlob,
void CommonReferenceTest::ValidateBlobs(const ov::Tensor& refBlob, const ov::Tensor& outBlob, const size_t blob_idx,
float threshold, float abs_threshold, size_t actual_comparision_size) {
ASSERT_EQ(refBlob.get_element_type(), outBlob.get_element_type());
ASSERT_EQ(refBlob.get_byte_size(), outBlob.get_byte_size());
ASSERT_EQ(refBlob.get_element_type(), outBlob.get_element_type())
<< "Incompatible element type for blob with index " << blob_idx;
ASSERT_EQ(refBlob.get_byte_size(), outBlob.get_byte_size())
<< "Incorrect byte size for blob with index " << blob_idx;
if (actual_comparision_size == 0)
actual_comparision_size = refBlob.get_size();

View File

@@ -24,6 +24,7 @@ public:
static void ValidateBlobs(const ov::Tensor& refBlob,
const ov::Tensor& outBlob,
const size_t blob_idx,
float threshold,
float abs_threshold,
size_t actual_comparision_size = 0);

View File

@@ -0,0 +1,357 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "openvino/op/unique.hpp"
#include <gtest/gtest.h>
#include "base_reference_test.hpp"
#include "openvino/op/constant.hpp"
using namespace reference_tests;
using namespace ov;
namespace {
std::shared_ptr<op::v0::Constant> make_axis(const int64_t axis, const element::Type& et = element::i32) {
return op::v0::Constant::create(et, Shape{}, {axis});
}
struct UniqueParams {
template <typename Data_t, typename Index_t>
UniqueParams(const Shape& data_shape,
const std::vector<Data_t>& input_data,
const std::vector<Data_t>& expected_unique_values,
const std::vector<Index_t>& expected_indices,
const std::vector<Index_t>& expected_rev_indices,
const std::vector<int64_t>& expected_counts,
std::shared_ptr<op::v0::Constant> axis_descritptor = nullptr,
const bool sorted = true,
const std::string& tested_case = "")
: m_data_shape{data_shape},
m_data_type{element::from<Data_t>()},
m_index_type{element::from<Index_t>()},
m_input_data{CreateTensor(m_data_type, input_data)},
m_axis{axis_descritptor},
m_sorted{sorted},
m_tested_case{tested_case} {
m_expected_outputs[0] = CreateTensor(m_data_type, expected_unique_values);
m_expected_outputs[1] = CreateTensor(m_index_type, expected_indices);
m_expected_outputs[2] = CreateTensor(m_index_type, expected_rev_indices);
m_expected_outputs[3] = CreateTensor(element::i64, expected_counts);
}
Shape m_data_shape;
element::Type m_data_type;
element::Type m_index_type;
ov::Tensor m_input_data;
ov::TensorVector m_expected_outputs = ov::TensorVector(4);
std::shared_ptr<op::v0::Constant> m_axis = nullptr;
bool m_sorted;
std::string m_tested_case;
};
class ReferenceUniqueLayerTest_NoAxis : public testing::TestWithParam<UniqueParams>, public CommonReferenceTest {
public:
void SetUp() override {
const auto& params = GetParam();
function = CreateFunction(params);
inputData = {params.m_input_data};
refOutData = params.m_expected_outputs;
}
static std::string getTestCaseName(const testing::TestParamInfo<UniqueParams>& obj) {
const auto& param = obj.param;
std::ostringstream result;
result << "data_shape=" << param.m_data_shape << "; ";
result << "data_type=" << param.m_data_type << "; ";
result << "index_type=" << param.m_index_type << "; ";
result << "sorted=" << param.m_sorted << "; ";
if (param.m_axis) {
result << "axis=" << param.m_axis->cast_vector<int64_t>()[0] << "; ";
}
if (!param.m_tested_case.empty()) {
result << "tested_case=" << param.m_tested_case << "; ";
}
return result.str();
}
private:
static std::shared_ptr<Model> CreateFunction(const UniqueParams& params) {
const auto in = std::make_shared<op::v0::Parameter>(params.m_data_type, params.m_data_shape);
std::shared_ptr<Node> unique;
if (params.m_axis) {
unique = std::make_shared<op::v10::Unique>(in, params.m_axis, params.m_sorted, params.m_index_type);
} else {
unique = std::make_shared<op::v10::Unique>(in, params.m_sorted, params.m_index_type);
}
return std::make_shared<ov::Model>(unique, ParameterVector{in});
}
};
TEST_P(ReferenceUniqueLayerTest_NoAxis, CompareWithHardcodedRefs) {
Exec();
}
template <typename T>
std::vector<T> flatten(std::initializer_list<std::vector<T>> test_cases) {
using std::begin;
using std::end;
std::vector<T> flattened;
for (auto&& tc : test_cases) {
flattened.insert(flattened.end(), std::make_move_iterator(begin(tc)), std::make_move_iterator(end(tc)));
}
return flattened;
}
template <typename Data_t, typename Index_t>
std::vector<UniqueParams> params_unique_int() {
static_assert(std::numeric_limits<Data_t>::is_integer, "Integer type expected");
std::vector<UniqueParams> scalar_and_1D{UniqueParams{Shape{},
std::vector<Data_t>{1},
std::vector<Data_t>{1},
std::vector<Index_t>{0},
std::vector<Index_t>{0},
std::vector<int64_t>{1},
nullptr,
false},
UniqueParams{Shape{},
std::vector<Data_t>{1},
std::vector<Data_t>{1},
std::vector<Index_t>{0},
std::vector<Index_t>{0},
std::vector<int64_t>{1},
nullptr,
true},
UniqueParams{Shape{1},
std::vector<Data_t>{2},
std::vector<Data_t>{2},
std::vector<Index_t>{0},
std::vector<Index_t>{0},
std::vector<int64_t>{1},
nullptr,
false},
UniqueParams{Shape{1},
std::vector<Data_t>{2},
std::vector<Data_t>{2},
std::vector<Index_t>{0},
std::vector<Index_t>{0},
std::vector<int64_t>{1},
nullptr,
true},
UniqueParams{Shape{5},
std::vector<Data_t>{5, 4, 3, 2, 1},
std::vector<Data_t>{5, 4, 3, 2, 1},
std::vector<Index_t>{0, 1, 2, 3, 4},
std::vector<Index_t>{0, 1, 2, 3, 4},
std::vector<int64_t>{1, 1, 1, 1, 1},
nullptr,
false,
"1D no duplicates"},
UniqueParams{Shape{5},
std::vector<Data_t>{5, 4, 3, 2, 1},
std::vector<Data_t>{1, 2, 3, 4, 5},
std::vector<Index_t>{4, 3, 2, 1, 0},
std::vector<Index_t>{4, 3, 2, 1, 0},
std::vector<int64_t>{1, 1, 1, 1, 1},
nullptr,
true,
"1D no duplicates"},
UniqueParams{Shape{7},
std::vector<Data_t>{1, 3, 5, 3, 2, 4, 2},
std::vector<Data_t>{1, 3, 5, 2, 4},
std::vector<Index_t>{0, 1, 2, 4, 5},
std::vector<Index_t>{0, 1, 2, 1, 3, 4, 3},
std::vector<int64_t>{1, 2, 1, 2, 1},
nullptr,
false,
"1D with duplicates"},
UniqueParams{Shape{7},
std::vector<Data_t>{1, 3, 5, 3, 2, 4, 2},
std::vector<Data_t>{1, 2, 3, 4, 5},
std::vector<Index_t>{0, 4, 1, 5, 2},
std::vector<Index_t>{0, 2, 4, 2, 1, 3, 1},
std::vector<int64_t>{1, 2, 2, 1, 1},
nullptr,
true,
"1D with duplicates"},
UniqueParams{Shape{7},
std::vector<Data_t>{1, 3, 5, 3, 2, 4, 2},
std::vector<Data_t>{1, 2, 3, 4, 5},
std::vector<Index_t>{0, 4, 1, 5, 2},
std::vector<Index_t>{0, 2, 4, 2, 1, 3, 1},
std::vector<int64_t>{1, 2, 2, 1, 1},
make_axis(0),
true,
"1D with duplicates and axis"}};
std::vector<UniqueParams> N_C_layout{UniqueParams{Shape{2, 6},
std::vector<Data_t>{3, 5, 3, 2, 4, 2, 1, 2, 3, 4, 5, 6},
std::vector<Data_t>{3, 5, 2, 4, 1, 6},
std::vector<Index_t>{0, 1, 3, 4, 6, 11},
std::vector<Index_t>{0, 1, 0, 2, 3, 2, 4, 2, 0, 3, 1, 5},
std::vector<int64_t>{3, 2, 3, 2, 1, 1},
nullptr,
false,
"2D no axis"},
UniqueParams{Shape{2, 4},
std::vector<Data_t>{1, 2, 3, 4, 1, 2, 3, 5},
std::vector<Data_t>{1, 2, 3, 4, 1, 2, 3, 5},
std::vector<Index_t>{0, 1},
std::vector<Index_t>{0, 1},
std::vector<int64_t>{1, 1},
make_axis(0),
false,
"2D no duplicates"},
UniqueParams{Shape{2, 4},
std::vector<Data_t>{1, 2, 3, 4, 1, 2, 3, 5},
std::vector<Data_t>{1, 2, 3, 4, 1, 2, 3, 5},
std::vector<Index_t>{0, 1, 2, 3},
std::vector<Index_t>{0, 1, 2, 3},
std::vector<int64_t>{1, 1, 1, 1},
make_axis(1),
false,
"2D no duplicates"},
UniqueParams{Shape{2, 4},
std::vector<Data_t>{1, 2, 2, 4, 1, 2, 2, 5},
std::vector<Data_t>{1, 2, 4, 1, 2, 5},
std::vector<Index_t>{0, 1, 3},
std::vector<Index_t>{0, 1, 1, 2},
std::vector<int64_t>{1, 2, 1},
make_axis(1),
false,
"2D with duplicates"}};
std::vector<UniqueParams> N_D_layout{UniqueParams{Shape{2, 2, 3},
// 2 identical 2D slices over axis 0
std::vector<Data_t>{1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6},
std::vector<Data_t>{1, 2, 3, 4, 5, 6},
std::vector<Index_t>{0},
std::vector<Index_t>{0, 0},
std::vector<int64_t>{2},
make_axis(0),
false,
"3D with duplicates"},
UniqueParams{Shape{2, 2, 3},
// 2 identical 2D slices over axis 1
std::vector<Data_t>{6, 5, 4, 6, 5, 4, 3, 2, 1, 3, 2, 1},
std::vector<Data_t>{6, 5, 4, 3, 2, 1},
std::vector<Index_t>{0},
std::vector<Index_t>{0, 0},
std::vector<int64_t>{2},
make_axis(1),
false,
"3D with duplicates"},
UniqueParams{Shape{2, 2, 3},
// the first and the last slice over axis 2 are equal
std::vector<Data_t>{-1, 2, -1, 5, -3, 5, 7, -8, 7, 4, 4, 4},
std::vector<Data_t>{-1, 2, 5, -3, 7, -8, 4, 4},
std::vector<Index_t>{0, 1},
std::vector<Index_t>{0, 1, 0},
std::vector<int64_t>{2, 1},
make_axis(2),
false,
"3D with duplicates(1 & 3)"},
UniqueParams{Shape{2, 2, 3},
// the first and the second slice over axis 2 are equal
std::vector<Data_t>{-1, -1, 2, 5, 5, -3, 7, 7, -8, 4, 4, 4},
std::vector<Data_t>{-1, 2, 5, -3, 7, -8, 4, 4},
std::vector<Index_t>{0, 2},
std::vector<Index_t>{0, 0, 1},
std::vector<int64_t>{2, 1},
make_axis(2),
false,
"3D with duplicates (1 & 2)"}};
return flatten({std::move(scalar_and_1D), std::move(N_C_layout), std::move(N_D_layout)});
}
template <typename Data_t, typename Index_t>
std::vector<UniqueParams> params_unique_float() {
static_assert(!std::numeric_limits<Data_t>::is_integer, "Floating point type expected");
// just some fancy numbers to be used in the input tensors
const auto sq2 = Data_t{1.4142135};
const auto sq3 = Data_t{1.7320508075};
const auto e = Data_t{2.71828};
const auto pi = Data_t{3.141592};
const std::vector<UniqueParams> params{UniqueParams{Shape{},
std::vector<Data_t>{pi},
std::vector<Data_t>{pi},
std::vector<Index_t>{0},
std::vector<Index_t>{0},
std::vector<int64_t>{1},
nullptr,
false},
UniqueParams{Shape{},
std::vector<Data_t>{pi},
std::vector<Data_t>{pi},
std::vector<Index_t>{0},
std::vector<Index_t>{0},
std::vector<int64_t>{1},
nullptr,
true},
UniqueParams{Shape{1},
std::vector<Data_t>{-e},
std::vector<Data_t>{-e},
std::vector<Index_t>{0},
std::vector<Index_t>{0},
std::vector<int64_t>{1},
nullptr,
false},
UniqueParams{Shape{1},
std::vector<Data_t>{-e},
std::vector<Data_t>{-e},
std::vector<Index_t>{0},
std::vector<Index_t>{0},
std::vector<int64_t>{1},
nullptr,
true},
UniqueParams{Shape{6},
std::vector<Data_t>{pi, -pi, -e, e, sq3, sq2},
std::vector<Data_t>{pi, -pi, -e, e, sq3, sq2},
std::vector<Index_t>{0, 1, 2, 3, 4, 5},
std::vector<Index_t>{0, 1, 2, 3, 4, 5},
std::vector<int64_t>{1, 1, 1, 1, 1, 1},
nullptr,
false,
"1D no duplicates"},
UniqueParams{Shape{6},
std::vector<Data_t>{pi, -pi, -e, e, sq3, sq2},
std::vector<Data_t>{-pi, -e, sq2, sq3, e, pi},
std::vector<Index_t>{1, 2, 5, 4, 3, 0},
std::vector<Index_t>{5, 0, 1, 4, 3, 2},
std::vector<int64_t>{1, 1, 1, 1, 1, 1},
nullptr,
true,
"1D no duplicates"}};
return params;
}
INSTANTIATE_TEST_SUITE_P(smoke_ReferenceUniqueLayerTest_NoAxis,
ReferenceUniqueLayerTest_NoAxis,
::testing::ValuesIn(flatten({params_unique_float<float16, int32_t>(),
params_unique_float<float16, int64_t>(),
params_unique_float<bfloat16, int32_t>(),
params_unique_float<bfloat16, int64_t>(),
params_unique_float<float, int32_t>(),
params_unique_float<float, int64_t>(),
params_unique_float<double, int32_t>(),
params_unique_float<double, int64_t>(),
params_unique_int<int16_t, int32_t>(),
params_unique_int<int8_t, int64_t>(),
params_unique_int<int8_t, int32_t>(),
params_unique_int<int16_t, int64_t>(),
params_unique_int<int32_t, int32_t>(),
params_unique_int<int32_t, int64_t>(),
params_unique_int<int64_t, int32_t>(),
params_unique_int<int64_t, int64_t>()})),
ReferenceUniqueLayerTest_NoAxis::getTestCaseName);
} // namespace

View File

@@ -109,7 +109,7 @@ void ReferenceCNNTest::Validate() {
outputs_legacy.emplace_back(element::f32, result->get_shape(), outData);
}
for (size_t i = 0; i < outputs_legacy.size(); i++) {
CommonReferenceTest::ValidateBlobs(outputs_legacy[i], outputs_ov20[i], threshold, abs_threshold);
CommonReferenceTest::ValidateBlobs(outputs_legacy[i], outputs_ov20[i], i, threshold, abs_threshold);
}
}