diff --git a/src/core/reference/include/ngraph/runtime/reference/unique.hpp b/src/core/reference/include/ngraph/runtime/reference/unique.hpp new file mode 100644 index 00000000000..3573961f97c --- /dev/null +++ b/src/core/reference/include/ngraph/runtime/reference/unique.hpp @@ -0,0 +1,323 @@ +// Copyright (C) 2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "gather.hpp" +#include "ngraph/coordinate_index.hpp" +#include "ngraph/coordinate_transform.hpp" +#include "ngraph/shape.hpp" + +namespace ngraph { +namespace runtime { +namespace reference { + +enum class DescriptorType { SINGLE_VALUE, SLICE }; + +template +struct TensorSlice { + TensorSlice(const Index_t idx_, const DescriptorType descriptor_type_) + : idx{idx_}, + descriptor_type{descriptor_type_} {} + TensorSlice(const Index_t idx_, const Index_t rev_idx_, const Count_t count_) + : idx{idx_}, + rev_idx{rev_idx_}, + count{count_} {} + /// The index of the current element in the original input tensor. It never changes even if the elements get + /// sorted. This value is used as a mapping between a unique element in the first output tensor and the position + /// of this element in the original input tensor. + Index_t idx = 0; + /// The rev_idx is a mapping between every element in the original input and the location of a unique element + /// in the first output tensor. More than one Element can have the same rev_idx. + Index_t rev_idx = -1; + /// The number of occurrences of a given element in the input tensor. This value is different than one only for + /// duplicates found in the input tensor. + Count_t count = 1; + /// Indicates if this object points to a single value in the input tensor (rather than a slice of the tensor) + DescriptorType descriptor_type = DescriptorType::SINGLE_VALUE; +}; + +template +struct UniqueElements { + /// Contains descriptors of all elements in the input tensor. Possibly sorted by value. + std::vector> all_tensor_elements; + /// Subset of all tensor elements. First occurrences of the unique values. + std::vector> unique_tensor_elements; + /// Axis (optional). Used to gather unique elements over a given dimension. + int64_t axis = 0; +}; + +namespace { + +// Generates descriptors of slices or individual elems of the input tensor. This function returns a vector of +// helper objects representing elements that the "unique" algorithm is supposed to process later. +template +std::vector> generate_descriptors(const size_t count, const DescriptorType type) { + std::vector> descriptors; + descriptors.reserve(count); + + for (Index_t i = 0; i < count; ++i) { + descriptors.emplace_back(i, type); + } + + return descriptors; +} + +// Returns indices of the first element of each tensor slice. The index is equal to a coordinate index. +template +inline std::pair first_elems_of_both_slices(const TensorSlice& lhs, + const TensorSlice& rhs, + const std::vector& data_shape_strides, + const int64_t axis) { + return {data_shape_strides[axis] * lhs.idx, data_shape_strides[axis] * rhs.idx}; +} + +template +inline size_t calc_slices_offset(const TensorSlice& lhs, + const TensorSlice& rhs, + const std::vector& data_shape_strides, + const int64_t axis) { + const auto first_elem_indices = first_elems_of_both_slices(lhs, rhs, data_shape_strides, axis); + if (first_elem_indices.first > first_elem_indices.second) { + return first_elem_indices.first - first_elem_indices.second; + } else { + return first_elem_indices.second - first_elem_indices.first; + } +} + +inline Shape slice_shape_to_iterate(Shape data_shape, const int64_t axis) { + data_shape.erase(data_shape.begin() + axis, data_shape.begin() + axis + 1); + return data_shape; +} + +bool scalar_or_single_element(const Shape& s) { + return std::all_of(std::begin(s), std::end(s), [](Shape::value_type d) { + return d == 1; + }); +} +} // namespace + +template +UniqueElements find_unique_elements(const Data_t* data, + const Shape& data_shape, + std::unique_ptr axis, + const bool sorted) { + using std::begin; + using std::end; + + const auto data_shape_strides = ngraph::row_major_strides(data_shape); + + const auto ascending_order = [&data](const TensorSlice& lhs, + const TensorSlice& rhs) { + return *(data + lhs.idx) < *(data + rhs.idx); + }; + + const auto slices_ascending_order = [&](const TensorSlice& lhs, + const TensorSlice& rhs) { + const auto slices_offset = calc_slices_offset(lhs, rhs, data_shape_strides, *axis); + const auto shape_to_iterate = slice_shape_to_iterate(data_shape, *axis); + + for (auto it = CoordinateIterator(shape_to_iterate); it != CoordinateIterator::end(); ++it) { + auto elem_coord = *it; + elem_coord.insert(elem_coord.cbegin() + *axis, lhs.idx); + const auto lhs_elem_idx = ngraph::coordinate_index(elem_coord, data_shape); + const auto rhs_elem_idx = lhs_elem_idx + slices_offset; + if (*(data + rhs_elem_idx) > *(data + lhs_elem_idx)) { + return false; + } + } + + return true; + }; + + const auto elements_are_equal = [&data](const TensorSlice& lhs, + const TensorSlice& rhs) { + return *(data + lhs.idx) == *(data + rhs.idx); + }; + + const auto slices_are_equal = [&](const TensorSlice& lhs, + const TensorSlice& rhs) { + const auto& slice_with_lower_idx = + std::min(lhs, rhs, [](const TensorSlice& a, const TensorSlice& b) { + return a.idx < b.idx; + }); + + // the individual elements in the two compared slices are always separated by the same offset + // and this can be used to compare them elementwise + const auto slices_offset = calc_slices_offset(lhs, rhs, data_shape_strides, *axis); + const auto shape_to_iterate = slice_shape_to_iterate(data_shape, *axis); + + for (auto it = CoordinateIterator(shape_to_iterate); it != CoordinateIterator::end(); ++it) { + // All slice elements have a "slice index" constant value at the axis position, only the other dimensions + // vary for each slice element. Those dimensions are provided by CoordinateIterator, the value at axis + // needs to be injected manually. + auto elem_coord = *it; + elem_coord.insert(elem_coord.cbegin() + *axis, slice_with_lower_idx.idx); + const auto lhs_elem_idx = ngraph::coordinate_index(elem_coord, data_shape); + const auto rhs_elem_idx = lhs_elem_idx + slices_offset; + if (*(data + lhs_elem_idx) != *(data + rhs_elem_idx)) { + return false; + } + } + return true; + }; + + const auto already_unique = [&elements_are_equal](const TensorSlice& existing_unique_elem) { + return [&elements_are_equal, &existing_unique_elem](const TensorSlice& x) { + return elements_are_equal(existing_unique_elem, x); + }; + }; + + const auto already_unique_slice = [&slices_are_equal](const TensorSlice& existing_unique_elem) { + return [&slices_are_equal, &existing_unique_elem](const TensorSlice& x) { + return slices_are_equal(existing_unique_elem, x); + }; + }; + + UniqueElements ret; + + if (scalar_or_single_element(data_shape)) { + ret.all_tensor_elements.emplace_back(0, 0, 1); + ret.unique_tensor_elements.emplace_back(0, 0, 1); + return ret; + } else if (!axis || (is_vector(data_shape) && data_shape[0] > 1)) { // 1D or N-D without any axis + const auto data_elems_count = shape_size(data_shape); + ret.all_tensor_elements = + generate_descriptors(data_elems_count, DescriptorType::SINGLE_VALUE); + + if (sorted) { + std::sort(begin(ret.all_tensor_elements), end(ret.all_tensor_elements), ascending_order); + } + + ret.all_tensor_elements[0].rev_idx = 0; + ret.unique_tensor_elements.push_back(ret.all_tensor_elements[0]); + for (size_t i = 1; i < data_elems_count; ++i) { + auto& tensor_element = ret.all_tensor_elements[i]; + auto existing_unique = end(ret.unique_tensor_elements); + if (sorted) { + existing_unique = std::lower_bound(begin(ret.unique_tensor_elements), + end(ret.unique_tensor_elements), + tensor_element, + ascending_order); + } else { + existing_unique = std::find_if(begin(ret.unique_tensor_elements), + end(ret.unique_tensor_elements), + already_unique(tensor_element)); + } + + if (existing_unique != end(ret.unique_tensor_elements)) { + tensor_element.rev_idx = existing_unique->rev_idx; + existing_unique->count++; + } else { + tensor_element.rev_idx = ret.unique_tensor_elements.size(); + ret.unique_tensor_elements.push_back(tensor_element); + } + } + } else { + ret.axis = *axis; + ret.all_tensor_elements = generate_descriptors(data_shape[*axis], DescriptorType::SLICE); + + if (sorted) { + std::sort(begin(ret.all_tensor_elements), end(ret.all_tensor_elements), slices_ascending_order); + } + + ret.all_tensor_elements[0].rev_idx = 0; + ret.unique_tensor_elements.push_back(ret.all_tensor_elements[0]); + + for (size_t i = 1; i < data_shape[*axis]; ++i) { + auto& tensor_element = ret.all_tensor_elements[i]; + auto existing_unique = end(ret.unique_tensor_elements); + if (sorted) { + existing_unique = std::lower_bound(begin(ret.unique_tensor_elements), + end(ret.unique_tensor_elements), + tensor_element, + slices_ascending_order); + } else { + existing_unique = std::find_if(begin(ret.unique_tensor_elements), + end(ret.unique_tensor_elements), + already_unique_slice(tensor_element)); + } + + if (existing_unique != end(ret.unique_tensor_elements)) { + tensor_element.rev_idx = existing_unique->rev_idx; + existing_unique->count++; + } else { + tensor_element.rev_idx = ret.unique_tensor_elements.size(); + ret.unique_tensor_elements.push_back(tensor_element); + } + } + } + + return ret; +} + +template +std::tuple make_tensor_shapes(const UniqueElements& unique_elements, + const Shape& data_shape, + std::unique_ptr axis) { + if (axis) { + // if the axis was specified we need to return a data shape with a modified dimension-at-axis + // this is where we need to insert the number of detected unique elements + // all other dimensions stay the same as in the original data_shape + auto output0 = data_shape; + output0[*axis] = unique_elements.unique_tensor_elements.size(); + const auto output1_3 = Shape{unique_elements.unique_tensor_elements.size()}; + const auto output2 = Shape{data_shape[*axis]}; + return std::make_tuple(output0, output1_3, output2); + } else { + const auto output0 = Shape{unique_elements.unique_tensor_elements.size()}; + const auto output1_3 = output0; + const auto output2 = Shape{unique_elements.all_tensor_elements.size()}; + return std::make_tuple(output0, output1_3, output2); + } +} + +template +void unique(Data_t* out_unique_elements, + Index_t* out_indices, + Index_t* out_rev_indices, + Count_t* out_counts, + const Data_t* data, + const Shape& data_shape, + const Shape& out_shape, + const UniqueElements& descriptors) { + if (descriptors.unique_tensor_elements[0].descriptor_type == DescriptorType::SINGLE_VALUE) { + for (size_t i = 0; i < descriptors.unique_tensor_elements.size(); ++i) { + const auto& descriptor = descriptors.unique_tensor_elements[i]; + out_unique_elements[i] = *(data + descriptor.idx); + out_indices[i] = descriptor.idx; + out_counts[i] = descriptor.count; + } + } else { + std::vector indices; + indices.reserve(descriptors.unique_tensor_elements.size()); + + for (size_t i = 0; i < descriptors.unique_tensor_elements.size(); ++i) { + const auto& descriptor = descriptors.unique_tensor_elements[i]; + out_indices[i] = descriptor.idx; + out_counts[i] = descriptor.count; + + indices.push_back(descriptor.idx); + } + + ngraph::runtime::reference::gather(data, + indices.data(), + out_unique_elements, + data_shape, + Shape{descriptors.unique_tensor_elements.size()}, + out_shape, + descriptors.axis); + } + + // filling out this output tensor requires a separate pass over all elements of the input tensor + // for each input element we need to output and index fo that element in the first output tensor + // additionally if sorting was involved the "all_tensor_elements" might be ordered differently than the elements + // in the original input tensor - this is why descriptor.idx is used for indexing the output tensor below + for (const auto& descriptor : descriptors.all_tensor_elements) { + out_rev_indices[descriptor.idx] = descriptor.rev_idx; + } +} +} // namespace reference +} // namespace runtime +} // namespace ngraph diff --git a/src/core/src/op/unique.cpp b/src/core/src/op/unique.cpp index a986c341c41..1edab442abf 100644 --- a/src/core/src/op/unique.cpp +++ b/src/core/src/op/unique.cpp @@ -5,6 +5,7 @@ #include "openvino/op/unique.hpp" #include "itt.hpp" +#include "ngraph/runtime/reference/unique.hpp" #include "ngraph/validation_util.hpp" #include "openvino/op/util/op_types.hpp" @@ -14,6 +15,80 @@ int64_t extract_axis(const std::shared_ptr& axis_constant) { const auto axis_vec = axis_constant->cast_vector(); return axis_vec.at(0); } + +template +ngraph::runtime::reference::UniqueElements call_unique(const Tensor& input, + std::unique_ptr axis, + const bool sorted) { + return ngraph::runtime::reference::find_unique_elements(input.data(), + input.get_shape(), + std::move(axis), + sorted); +} + +std::tuple calculate_static_output_shapes(const Tensor& input_data, const op::v10::Unique& op) { + using Index_t = int32_t; + using Counts_t = int32_t; + + const auto maybe_extract_axis = [&op]() { + std::unique_ptr axis; + if (op.get_input_size() == 2 && ov::op::util::is_constant(op.input_value(1).get_node())) { + const auto axis_constant = + std::dynamic_pointer_cast(op.input_value(1).get_node_shared_ptr()); + axis = std::unique_ptr(new int64_t{extract_axis(axis_constant)}); + } + return axis; + }; + + ngraph::runtime::reference::UniqueElements unique_elements; + std::unique_ptr axis = maybe_extract_axis(); + + switch (op.get_input_element_type(0)) { + case element::boolean: + unique_elements = call_unique(input_data, std::move(axis), op.get_sorted()); + break; + case element::i8: + unique_elements = call_unique(input_data, std::move(axis), op.get_sorted()); + break; + case element::i16: + unique_elements = call_unique(input_data, std::move(axis), op.get_sorted()); + break; + case element::i32: + unique_elements = call_unique(input_data, std::move(axis), op.get_sorted()); + break; + case element::i64: + unique_elements = call_unique(input_data, std::move(axis), op.get_sorted()); + break; + case element::u8: + unique_elements = call_unique(input_data, std::move(axis), op.get_sorted()); + break; + case element::u16: + unique_elements = call_unique(input_data, std::move(axis), op.get_sorted()); + break; + case element::u32: + unique_elements = call_unique(input_data, std::move(axis), op.get_sorted()); + break; + case element::u64: + unique_elements = call_unique(input_data, std::move(axis), op.get_sorted()); + break; + case element::bf16: + unique_elements = call_unique(input_data, std::move(axis), op.get_sorted()); + break; + case element::f16: + unique_elements = call_unique(input_data, std::move(axis), op.get_sorted()); + break; + case element::f32: + unique_elements = call_unique(input_data, std::move(axis), op.get_sorted()); + break; + case element::f64: + unique_elements = call_unique(input_data, std::move(axis), op.get_sorted()); + break; + } + + return ngraph::runtime::reference::make_tensor_shapes(unique_elements, + input_data.get_shape(), + maybe_extract_axis()); +} } // namespace op::v10::Unique::Unique(const Output& data, const bool sorted, const element::Type& index_element_type) @@ -57,57 +132,75 @@ void op::v10::Unique::validate_and_infer_types() { output_shapes[0] = PartialShape::dynamic(); output_shapes[1] = input_tensor_capacity > 0 ? PartialShape{{1, input_tensor_capacity}} : PartialShape{{Dimension::dynamic()}}; - output_shapes[2] = output_shapes[1]; + output_shapes[2] = + input_tensor_capacity > 0 ? PartialShape{{input_tensor_capacity}} : PartialShape{{Dimension::dynamic()}}; output_shapes[3] = output_shapes[1]; - if (get_input_size() == 2) { - NODE_VALIDATION_CHECK( - this, - get_input_element_type(1) == element::i32 || get_input_element_type(1) == element::i64, - "The allowed element types of the 'axis' input tensor of the Unique operator are i32 and i64."); + if (ov::op::util::is_constant(input_value(0).get_node())) { + const auto input_const = std::dynamic_pointer_cast(input_value(0).get_node_shared_ptr()); + ov::Tensor input_data = ov::Tensor(input_const->get_element_type(), input_const->get_shape()); + memcpy(input_data.data(), input_const->get_data_ptr(), input_data.get_byte_size()); + const auto tensor_shapes = calculate_static_output_shapes(input_data, *this); - NODE_VALIDATION_CHECK( - this, - get_input_partial_shape(1) == Shape{} || get_input_partial_shape(1) == Shape{1}, - "The 'axis' input tensor of the Unique operator must be a scalar or 1D tensor with 1 element."); + output_shapes[0] = std::get<0>(tensor_shapes); + output_shapes[1] = std::get<1>(tensor_shapes); + output_shapes[2] = std::get<2>(tensor_shapes); + output_shapes[3] = std::get<1>(tensor_shapes); + } else { + if (get_input_size() == 2) { + NODE_VALIDATION_CHECK( + this, + get_input_element_type(1) == element::i32 || get_input_element_type(1) == element::i64, + "The allowed element types of the 'axis' input tensor of the Unique operator are i32 and i64."); - NODE_VALIDATION_CHECK(this, - ov::op::util::is_constant(input_value(1).get_node()), - "The 'axis' input of the Unique operator must be connected to a Constant."); - const int64_t axis = - extract_axis(std::dynamic_pointer_cast(input_value(1).get_node_shared_ptr())); + NODE_VALIDATION_CHECK( + this, + get_input_partial_shape(1) == Shape{} || get_input_partial_shape(1) == Shape{1}, + "The 'axis' input tensor of the Unique operator must be a scalar or 1D tensor with 1 element."); - if (input_shape.rank().is_static()) { - const auto normalized_axis = ngraph::normalize_axis(this, axis, input_shape.rank()); - const auto dim_at_axis = input_shape[normalized_axis]; + NODE_VALIDATION_CHECK(this, + ov::op::util::is_constant(input_value(1).get_node()), + "The 'axis' input of the Unique operator must be connected to a Constant."); + const int64_t axis = + extract_axis(std::dynamic_pointer_cast(input_value(1).get_node_shared_ptr())); - Dimension output_dim_at_axis; - if (dim_at_axis.is_dynamic()) { - if (dim_at_axis == Dimension::dynamic()) { - output_dim_at_axis = dim_at_axis; + if (input_shape.rank().is_static()) { + const auto normalized_axis = ngraph::normalize_axis(this, axis, input_shape.rank()); + const auto dim_at_axis = input_shape[normalized_axis]; + + Dimension output_dim_at_axis; + Dimension rev_idx_size; + if (dim_at_axis.is_dynamic()) { + if (dim_at_axis == Dimension::dynamic()) { + output_dim_at_axis = dim_at_axis; + } else { + output_dim_at_axis = Dimension{1, dim_at_axis.get_max_length()}; + } + rev_idx_size = dim_at_axis; + } else if (dim_at_axis.get_length() == 0) { + output_dim_at_axis = Dimension{0}; + output_shapes[1] = PartialShape{{0}}; + rev_idx_size = output_dim_at_axis; + output_shapes[3] = PartialShape{{0}}; } else { output_dim_at_axis = Dimension{1, dim_at_axis.get_max_length()}; + rev_idx_size = Dimension{dim_at_axis.get_max_length()}; } - } else if (dim_at_axis.get_length() == 0) { - output_dim_at_axis = Dimension{0}; - output_shapes[1] = PartialShape{{0}}; - output_shapes[2] = PartialShape{{0}}; - output_shapes[3] = PartialShape{{0}}; - } else { - output_dim_at_axis = Dimension{1, dim_at_axis.get_max_length()}; - } - auto output_shape = input_shape; - output_shape[normalized_axis] = output_dim_at_axis; - output_shapes[0] = output_shape; - } - } else { - // no axis => flattened input tensor - if (input_shape.is_static()) { - // between 1 and the total number of input tensor's unique elements - output_shapes[0] = PartialShape{{Dimension{1, input_tensor_capacity}}}; + auto output_shape = input_shape; + output_shape[normalized_axis] = output_dim_at_axis; + output_shapes[0] = output_shape; + + output_shapes[2] = PartialShape{rev_idx_size}; + } } else { - output_shapes[0] = PartialShape{{Dimension::dynamic()}}; + // no axis => flattened input tensor + if (input_shape.is_static()) { + // between 1 and the total number of input tensor's elements + output_shapes[0] = PartialShape{{Dimension{1, input_tensor_capacity}}}; + } else { + output_shapes[0] = PartialShape{{Dimension::dynamic()}}; + } } } diff --git a/src/core/tests/type_prop/unique.cpp b/src/core/tests/type_prop/unique.cpp index f613da71d32..660765bf0fa 100644 --- a/src/core/tests/type_prop/unique.cpp +++ b/src/core/tests/type_prop/unique.cpp @@ -36,7 +36,7 @@ TEST(type_prop, unique_no_axis_3d) { CHECK_ELEMENT_TYPES(unique, {{element::f32, element::i64, element::i64, element::i64}}); CHECK_OUTPUT_SHAPES(unique, - {{PartialShape{{1, 16}}, PartialShape{{1, 16}}, PartialShape{{1, 16}}, PartialShape{{1, 16}}}}); + {{PartialShape{{1, 16}}, PartialShape{{1, 16}}, PartialShape{{16}}, PartialShape{{1, 16}}}}); } TEST(type_prop, unique_no_axis_3d_index_type_i32) { @@ -45,7 +45,7 @@ TEST(type_prop, unique_no_axis_3d_index_type_i32) { CHECK_ELEMENT_TYPES(unique, {{element::f32, element::i32, element::i32, element::i64}}); CHECK_OUTPUT_SHAPES(unique, - {{PartialShape{{1, 9}}, PartialShape{{1, 9}}, PartialShape{{1, 9}}, PartialShape{{1, 9}}}}); + {{PartialShape{{1, 9}}, PartialShape{{1, 9}}, PartialShape{{9}}, PartialShape{{1, 9}}}}); } TEST(type_prop, unique_no_axis_scalar) { @@ -72,7 +72,7 @@ TEST(type_prop, unique_3d_scalar_axis) { CHECK_ELEMENT_TYPES(unique, {{element::f32, element::i64, element::i64, element::i64}}); CHECK_OUTPUT_SHAPES( unique, - {{PartialShape{{2}, {1, 4}, {2}}, PartialShape{{1, 16}}, PartialShape{{1, 16}}, PartialShape{{1, 16}}}}); + {{PartialShape{{2}, {1, 4}, {2}}, PartialShape{{1, 16}}, PartialShape{{4}}, PartialShape{{1, 16}}}}); } TEST(type_prop, unique_3d_axis_1d) { @@ -83,7 +83,7 @@ TEST(type_prop, unique_3d_axis_1d) { CHECK_ELEMENT_TYPES(unique, {{element::f32, element::i64, element::i64, element::i64}}); CHECK_OUTPUT_SHAPES( unique, - {{PartialShape{{2}, {4}, {1, 2}}, PartialShape{{1, 16}}, PartialShape{{1, 16}}, PartialShape{{1, 16}}}}); + {{PartialShape{{2}, {4}, {1, 2}}, PartialShape{{1, 16}}, PartialShape{{2}}, PartialShape{{1, 16}}}}); } TEST(type_prop, unique_3d_negative_axis) { @@ -94,7 +94,7 @@ TEST(type_prop, unique_3d_negative_axis) { CHECK_ELEMENT_TYPES(unique, {{element::f32, element::i64, element::i64, element::i64}}); CHECK_OUTPUT_SHAPES( unique, - {{PartialShape{{1, 2}, {4}, {2}}, PartialShape{{1, 16}}, PartialShape{{1, 16}}, PartialShape{{1, 16}}}}); + {{PartialShape{{1, 2}, {4}, {2}}, PartialShape{{1, 16}}, PartialShape{{2}}, PartialShape{{1, 16}}}}); } TEST(type_prop, unique_dynamic_dim_at_axis) { @@ -115,7 +115,7 @@ TEST(type_prop, unique_dim_with_intervals_at_axis) { CHECK_ELEMENT_TYPES(unique, {{element::f32, element::i64, element::i64, element::i64}}); CHECK_OUTPUT_SHAPES( unique, - {{PartialShape{{2}, {1, 10}, {2}}, PartialShape{{-1}}, PartialShape{{-1}}, PartialShape{{-1}}}}); + {{PartialShape{{2}, {1, 10}, {2}}, PartialShape{{-1}}, PartialShape{{2, 10}}, PartialShape{{-1}}}}); } TEST(type_prop, unique_dynamic_rank) { @@ -197,3 +197,10 @@ TEST(type_prop, unique_with_zero_dimension) { CHECK_OUTPUT_SHAPES(unique, {{PartialShape{{1, 0, 2}}, PartialShape{{0}}, PartialShape{{0}}, PartialShape{{0}}}}); } + +TEST(type_prop, unique_with_constant_input_no_axis) { + const auto data = opset10::Constant::create(element::i32, Shape{5}, {5, 1, 4, 2, 5}); + const auto unique = make_shared(data); + + CHECK_OUTPUT_SHAPES(unique, {{Shape{{4}}, Shape{{4}}, Shape{{5}}, Shape{{4}}}}); +} diff --git a/src/plugins/template/backend/evaluates_map.cpp b/src/plugins/template/backend/evaluates_map.cpp index 83f5a9281ee..5db5cc3c645 100644 --- a/src/plugins/template/backend/evaluates_map.cpp +++ b/src/plugins/template/backend/evaluates_map.cpp @@ -87,6 +87,7 @@ #include #include #include +#include #include #include "backend.hpp" @@ -4200,6 +4201,63 @@ bool evaluate(const shared_ptr& op, const HostTensorVector& ou return true; } +template +void execute_unique(const HostTensorVector& outputs, + const HostTensorVector& inputs, + const shared_ptr& op) { + const auto maybe_extract_axis = [&op]() { + std::unique_ptr axis; + if (op->get_input_size() == 2 && ov::op::util::is_constant(op->input_value(1).get_node())) { + const auto axis_constant = + std::dynamic_pointer_cast(op->input_value(1).get_node_shared_ptr()); + const auto axis_vec = axis_constant->cast_vector(); + axis = std::unique_ptr(new int64_t{axis_vec.at(0)}); + } + return axis; + }; + + const auto unique_elements = + runtime::reference::find_unique_elements(inputs[0]->get_data_ptr(), + inputs[0]->get_shape(), + maybe_extract_axis(), + op->get_sorted()); + const auto tensor_shapes = + runtime::reference::make_tensor_shapes(unique_elements, inputs[0]->get_shape(), maybe_extract_axis()); + + auto& out_unique_elements = outputs[0]; + auto& out_indices = outputs[1]; + auto& out_rev_indices = outputs[2]; + auto& out_counts = outputs[3]; + + out_unique_elements->set_shape(std::get<0>(tensor_shapes)); + out_indices->set_shape(std::get<1>(tensor_shapes)); + out_rev_indices->set_shape(std::get<2>(tensor_shapes)); + out_counts->set_shape(std::get<1>(tensor_shapes)); + + runtime::reference::unique(out_unique_elements->get_data_ptr(), + out_indices->get_data_ptr(), + out_rev_indices->get_data_ptr(), + out_counts->get_data_ptr(), + inputs[0]->get_data_ptr(), + inputs[0]->get_shape(), + std::get<0>(tensor_shapes), + unique_elements); +} + +template +bool evaluate(const shared_ptr& op, const HostTensorVector& outputs, const HostTensorVector& inputs) { + using Data_t = typename element_type_traits::value_type; + if (op->get_index_element_type() == element::i32) { + execute_unique(outputs, inputs, op); + } else if (op->get_index_element_type() == element::i64) { + execute_unique(outputs, inputs, op); + } else { + return false; + } + + return true; +} + template bool evaluate(const shared_ptr& op, const HostTensorVector& outputs, diff --git a/src/plugins/template/backend/opset_int_tbl.hpp b/src/plugins/template/backend/opset_int_tbl.hpp index 5cfba92f21f..dc02cdd1c82 100644 --- a/src/plugins/template/backend/opset_int_tbl.hpp +++ b/src/plugins/template/backend/opset_int_tbl.hpp @@ -144,6 +144,7 @@ _OPENVINO_OP_REG(MulticlassNms, op::v9) _OPENVINO_OP_REG(IsFinite, op::v10) _OPENVINO_OP_REG(IsInf, op::v10) _OPENVINO_OP_REG(IsNaN, op::v10) +_OPENVINO_OP_REG(Unique, op::v10) _OPENVINO_OP_REG(AUGRUCell, ov::op::internal) _OPENVINO_OP_REG(AUGRUSequence, ov::op::internal) diff --git a/src/plugins/template/tests/functional/op_reference/base_reference_test.cpp b/src/plugins/template/tests/functional/op_reference/base_reference_test.cpp index 0c7226e1a37..cd040b80fd1 100644 --- a/src/plugins/template/tests/functional/op_reference/base_reference_test.cpp +++ b/src/plugins/template/tests/functional/op_reference/base_reference_test.cpp @@ -71,14 +71,16 @@ void CommonReferenceTest::Validate() { ASSERT_EQ(refOutData.size(), actualOutData.size()); for (size_t i = 0; i < refOutData.size(); i++) { - ValidateBlobs(refOutData[i], actualOutData[i], threshold, abs_threshold, actual_comparision_size); + ValidateBlobs(refOutData[i], actualOutData[i], i, threshold, abs_threshold, actual_comparision_size); } } -void CommonReferenceTest::ValidateBlobs(const ov::Tensor& refBlob, const ov::Tensor& outBlob, +void CommonReferenceTest::ValidateBlobs(const ov::Tensor& refBlob, const ov::Tensor& outBlob, const size_t blob_idx, float threshold, float abs_threshold, size_t actual_comparision_size) { - ASSERT_EQ(refBlob.get_element_type(), outBlob.get_element_type()); - ASSERT_EQ(refBlob.get_byte_size(), outBlob.get_byte_size()); + ASSERT_EQ(refBlob.get_element_type(), outBlob.get_element_type()) + << "Incompatible element type for blob with index " << blob_idx; + ASSERT_EQ(refBlob.get_byte_size(), outBlob.get_byte_size()) + << "Incorrect byte size for blob with index " << blob_idx; if (actual_comparision_size == 0) actual_comparision_size = refBlob.get_size(); diff --git a/src/plugins/template/tests/functional/op_reference/base_reference_test.hpp b/src/plugins/template/tests/functional/op_reference/base_reference_test.hpp index 73bc2717700..69b74badb31 100644 --- a/src/plugins/template/tests/functional/op_reference/base_reference_test.hpp +++ b/src/plugins/template/tests/functional/op_reference/base_reference_test.hpp @@ -24,6 +24,7 @@ public: static void ValidateBlobs(const ov::Tensor& refBlob, const ov::Tensor& outBlob, + const size_t blob_idx, float threshold, float abs_threshold, size_t actual_comparision_size = 0); diff --git a/src/plugins/template/tests/functional/op_reference/unique.cpp b/src/plugins/template/tests/functional/op_reference/unique.cpp new file mode 100644 index 00000000000..a867cbfa74e --- /dev/null +++ b/src/plugins/template/tests/functional/op_reference/unique.cpp @@ -0,0 +1,357 @@ +// Copyright (C) 2018-2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/op/unique.hpp" + +#include + +#include "base_reference_test.hpp" +#include "openvino/op/constant.hpp" + +using namespace reference_tests; +using namespace ov; + +namespace { + +std::shared_ptr make_axis(const int64_t axis, const element::Type& et = element::i32) { + return op::v0::Constant::create(et, Shape{}, {axis}); +} + +struct UniqueParams { + template + UniqueParams(const Shape& data_shape, + const std::vector& input_data, + const std::vector& expected_unique_values, + const std::vector& expected_indices, + const std::vector& expected_rev_indices, + const std::vector& expected_counts, + std::shared_ptr axis_descritptor = nullptr, + const bool sorted = true, + const std::string& tested_case = "") + : m_data_shape{data_shape}, + m_data_type{element::from()}, + m_index_type{element::from()}, + m_input_data{CreateTensor(m_data_type, input_data)}, + m_axis{axis_descritptor}, + m_sorted{sorted}, + m_tested_case{tested_case} { + m_expected_outputs[0] = CreateTensor(m_data_type, expected_unique_values); + m_expected_outputs[1] = CreateTensor(m_index_type, expected_indices); + m_expected_outputs[2] = CreateTensor(m_index_type, expected_rev_indices); + m_expected_outputs[3] = CreateTensor(element::i64, expected_counts); + } + + Shape m_data_shape; + element::Type m_data_type; + element::Type m_index_type; + ov::Tensor m_input_data; + ov::TensorVector m_expected_outputs = ov::TensorVector(4); + std::shared_ptr m_axis = nullptr; + bool m_sorted; + std::string m_tested_case; +}; + +class ReferenceUniqueLayerTest_NoAxis : public testing::TestWithParam, public CommonReferenceTest { +public: + void SetUp() override { + const auto& params = GetParam(); + function = CreateFunction(params); + inputData = {params.m_input_data}; + refOutData = params.m_expected_outputs; + } + + static std::string getTestCaseName(const testing::TestParamInfo& obj) { + const auto& param = obj.param; + std::ostringstream result; + + result << "data_shape=" << param.m_data_shape << "; "; + result << "data_type=" << param.m_data_type << "; "; + result << "index_type=" << param.m_index_type << "; "; + result << "sorted=" << param.m_sorted << "; "; + if (param.m_axis) { + result << "axis=" << param.m_axis->cast_vector()[0] << "; "; + } + if (!param.m_tested_case.empty()) { + result << "tested_case=" << param.m_tested_case << "; "; + } + + return result.str(); + } + +private: + static std::shared_ptr CreateFunction(const UniqueParams& params) { + const auto in = std::make_shared(params.m_data_type, params.m_data_shape); + std::shared_ptr unique; + if (params.m_axis) { + unique = std::make_shared(in, params.m_axis, params.m_sorted, params.m_index_type); + } else { + unique = std::make_shared(in, params.m_sorted, params.m_index_type); + } + return std::make_shared(unique, ParameterVector{in}); + } +}; + +TEST_P(ReferenceUniqueLayerTest_NoAxis, CompareWithHardcodedRefs) { + Exec(); +} + +template +std::vector flatten(std::initializer_list> test_cases) { + using std::begin; + using std::end; + + std::vector flattened; + for (auto&& tc : test_cases) { + flattened.insert(flattened.end(), std::make_move_iterator(begin(tc)), std::make_move_iterator(end(tc))); + } + return flattened; +} + +template +std::vector params_unique_int() { + static_assert(std::numeric_limits::is_integer, "Integer type expected"); + std::vector scalar_and_1D{UniqueParams{Shape{}, + std::vector{1}, + std::vector{1}, + std::vector{0}, + std::vector{0}, + std::vector{1}, + nullptr, + false}, + UniqueParams{Shape{}, + std::vector{1}, + std::vector{1}, + std::vector{0}, + std::vector{0}, + std::vector{1}, + nullptr, + true}, + UniqueParams{Shape{1}, + std::vector{2}, + std::vector{2}, + std::vector{0}, + std::vector{0}, + std::vector{1}, + nullptr, + false}, + UniqueParams{Shape{1}, + std::vector{2}, + std::vector{2}, + std::vector{0}, + std::vector{0}, + std::vector{1}, + nullptr, + true}, + UniqueParams{Shape{5}, + std::vector{5, 4, 3, 2, 1}, + std::vector{5, 4, 3, 2, 1}, + std::vector{0, 1, 2, 3, 4}, + std::vector{0, 1, 2, 3, 4}, + std::vector{1, 1, 1, 1, 1}, + nullptr, + false, + "1D no duplicates"}, + UniqueParams{Shape{5}, + std::vector{5, 4, 3, 2, 1}, + std::vector{1, 2, 3, 4, 5}, + std::vector{4, 3, 2, 1, 0}, + std::vector{4, 3, 2, 1, 0}, + std::vector{1, 1, 1, 1, 1}, + nullptr, + true, + "1D no duplicates"}, + UniqueParams{Shape{7}, + std::vector{1, 3, 5, 3, 2, 4, 2}, + std::vector{1, 3, 5, 2, 4}, + std::vector{0, 1, 2, 4, 5}, + std::vector{0, 1, 2, 1, 3, 4, 3}, + std::vector{1, 2, 1, 2, 1}, + nullptr, + false, + "1D with duplicates"}, + UniqueParams{Shape{7}, + std::vector{1, 3, 5, 3, 2, 4, 2}, + std::vector{1, 2, 3, 4, 5}, + std::vector{0, 4, 1, 5, 2}, + std::vector{0, 2, 4, 2, 1, 3, 1}, + std::vector{1, 2, 2, 1, 1}, + nullptr, + true, + "1D with duplicates"}, + UniqueParams{Shape{7}, + std::vector{1, 3, 5, 3, 2, 4, 2}, + std::vector{1, 2, 3, 4, 5}, + std::vector{0, 4, 1, 5, 2}, + std::vector{0, 2, 4, 2, 1, 3, 1}, + std::vector{1, 2, 2, 1, 1}, + make_axis(0), + true, + "1D with duplicates and axis"}}; + + std::vector N_C_layout{UniqueParams{Shape{2, 6}, + std::vector{3, 5, 3, 2, 4, 2, 1, 2, 3, 4, 5, 6}, + std::vector{3, 5, 2, 4, 1, 6}, + std::vector{0, 1, 3, 4, 6, 11}, + std::vector{0, 1, 0, 2, 3, 2, 4, 2, 0, 3, 1, 5}, + std::vector{3, 2, 3, 2, 1, 1}, + nullptr, + false, + "2D no axis"}, + UniqueParams{Shape{2, 4}, + std::vector{1, 2, 3, 4, 1, 2, 3, 5}, + std::vector{1, 2, 3, 4, 1, 2, 3, 5}, + std::vector{0, 1}, + std::vector{0, 1}, + std::vector{1, 1}, + make_axis(0), + false, + "2D no duplicates"}, + UniqueParams{Shape{2, 4}, + std::vector{1, 2, 3, 4, 1, 2, 3, 5}, + std::vector{1, 2, 3, 4, 1, 2, 3, 5}, + std::vector{0, 1, 2, 3}, + std::vector{0, 1, 2, 3}, + std::vector{1, 1, 1, 1}, + make_axis(1), + false, + "2D no duplicates"}, + UniqueParams{Shape{2, 4}, + std::vector{1, 2, 2, 4, 1, 2, 2, 5}, + std::vector{1, 2, 4, 1, 2, 5}, + std::vector{0, 1, 3}, + std::vector{0, 1, 1, 2}, + std::vector{1, 2, 1}, + make_axis(1), + false, + "2D with duplicates"}}; + + std::vector N_D_layout{UniqueParams{Shape{2, 2, 3}, + // 2 identical 2D slices over axis 0 + std::vector{1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6}, + std::vector{1, 2, 3, 4, 5, 6}, + std::vector{0}, + std::vector{0, 0}, + std::vector{2}, + make_axis(0), + false, + "3D with duplicates"}, + UniqueParams{Shape{2, 2, 3}, + // 2 identical 2D slices over axis 1 + std::vector{6, 5, 4, 6, 5, 4, 3, 2, 1, 3, 2, 1}, + std::vector{6, 5, 4, 3, 2, 1}, + std::vector{0}, + std::vector{0, 0}, + std::vector{2}, + make_axis(1), + false, + "3D with duplicates"}, + UniqueParams{Shape{2, 2, 3}, + // the first and the last slice over axis 2 are equal + std::vector{-1, 2, -1, 5, -3, 5, 7, -8, 7, 4, 4, 4}, + std::vector{-1, 2, 5, -3, 7, -8, 4, 4}, + std::vector{0, 1}, + std::vector{0, 1, 0}, + std::vector{2, 1}, + make_axis(2), + false, + "3D with duplicates(1 & 3)"}, + UniqueParams{Shape{2, 2, 3}, + // the first and the second slice over axis 2 are equal + std::vector{-1, -1, 2, 5, 5, -3, 7, 7, -8, 4, 4, 4}, + std::vector{-1, 2, 5, -3, 7, -8, 4, 4}, + std::vector{0, 2}, + std::vector{0, 0, 1}, + std::vector{2, 1}, + make_axis(2), + false, + "3D with duplicates (1 & 2)"}}; + + return flatten({std::move(scalar_and_1D), std::move(N_C_layout), std::move(N_D_layout)}); +} + +template +std::vector params_unique_float() { + static_assert(!std::numeric_limits::is_integer, "Floating point type expected"); + // just some fancy numbers to be used in the input tensors + const auto sq2 = Data_t{1.4142135}; + const auto sq3 = Data_t{1.7320508075}; + const auto e = Data_t{2.71828}; + const auto pi = Data_t{3.141592}; + + const std::vector params{UniqueParams{Shape{}, + std::vector{pi}, + std::vector{pi}, + std::vector{0}, + std::vector{0}, + std::vector{1}, + nullptr, + false}, + UniqueParams{Shape{}, + std::vector{pi}, + std::vector{pi}, + std::vector{0}, + std::vector{0}, + std::vector{1}, + nullptr, + true}, + UniqueParams{Shape{1}, + std::vector{-e}, + std::vector{-e}, + std::vector{0}, + std::vector{0}, + std::vector{1}, + nullptr, + false}, + UniqueParams{Shape{1}, + std::vector{-e}, + std::vector{-e}, + std::vector{0}, + std::vector{0}, + std::vector{1}, + nullptr, + true}, + UniqueParams{Shape{6}, + std::vector{pi, -pi, -e, e, sq3, sq2}, + std::vector{pi, -pi, -e, e, sq3, sq2}, + std::vector{0, 1, 2, 3, 4, 5}, + std::vector{0, 1, 2, 3, 4, 5}, + std::vector{1, 1, 1, 1, 1, 1}, + nullptr, + false, + "1D no duplicates"}, + UniqueParams{Shape{6}, + std::vector{pi, -pi, -e, e, sq3, sq2}, + std::vector{-pi, -e, sq2, sq3, e, pi}, + std::vector{1, 2, 5, 4, 3, 0}, + std::vector{5, 0, 1, 4, 3, 2}, + std::vector{1, 1, 1, 1, 1, 1}, + nullptr, + true, + "1D no duplicates"}}; + + return params; +} + +INSTANTIATE_TEST_SUITE_P(smoke_ReferenceUniqueLayerTest_NoAxis, + ReferenceUniqueLayerTest_NoAxis, + ::testing::ValuesIn(flatten({params_unique_float(), + params_unique_float(), + params_unique_float(), + params_unique_float(), + params_unique_float(), + params_unique_float(), + params_unique_float(), + params_unique_float(), + params_unique_int(), + params_unique_int(), + params_unique_int(), + params_unique_int(), + params_unique_int(), + params_unique_int(), + params_unique_int(), + params_unique_int()})), + + ReferenceUniqueLayerTest_NoAxis::getTestCaseName); + +} // namespace diff --git a/src/plugins/template/tests/functional/subgraph_reference/base_reference_cnn_test.cpp b/src/plugins/template/tests/functional/subgraph_reference/base_reference_cnn_test.cpp index b88c11f3709..85ae95fec59 100644 --- a/src/plugins/template/tests/functional/subgraph_reference/base_reference_cnn_test.cpp +++ b/src/plugins/template/tests/functional/subgraph_reference/base_reference_cnn_test.cpp @@ -109,7 +109,7 @@ void ReferenceCNNTest::Validate() { outputs_legacy.emplace_back(element::f32, result->get_shape(), outData); } for (size_t i = 0; i < outputs_legacy.size(); i++) { - CommonReferenceTest::ValidateBlobs(outputs_legacy[i], outputs_ov20[i], threshold, abs_threshold); + CommonReferenceTest::ValidateBlobs(outputs_legacy[i], outputs_ov20[i], i, threshold, abs_threshold); } }