diff --git a/inference-engine/tests/functional/inference_engine/transformations/algebraic_simplification.cpp b/inference-engine/tests/functional/inference_engine/transformations/algebraic_simplification.cpp index cc4596f493f..567ddda804d 100644 --- a/inference-engine/tests/functional/inference_engine/transformations/algebraic_simplification.cpp +++ b/inference-engine/tests/functional/inference_engine/transformations/algebraic_simplification.cpp @@ -290,8 +290,17 @@ TEST(algebraic_simplification, replace_transpose_with_reshape) { auto param = make_shared(element::f32, shape); shared_ptr A1; if (multiout) { + shared_ptr k; auto last_dim = shape.rank().get_length() - 1; - A1 = make_shared(param, last_dim, element::i32); + if (shape[last_dim].is_dynamic()) { + k = make_shared(make_shared(param), + op::Constant::create(element::i64, {}, {last_dim}), + op::Constant::create(element::i64, {}, {0})); + } else { + k = make_shared(element::i64, Shape{}, std::vector{shape[last_dim].get_length()}); + } + A1 = make_shared(param, k, last_dim, + op::v1::TopK::Mode::MAX, op::v1::TopK::SortType::NONE); } else { A1 = make_shared(param); } @@ -383,7 +392,7 @@ TEST(algebraic_simplification, gather_3d_indices_constant_axis_1) { shared_ptr A1; if (multiout) { auto last_dim = pshape.rank().get_length() - 1; - A1 = make_shared(A, last_dim, element::i32); + A1 = make_shared(A, op::Constant::create(element::i64, {}, {1}), last_dim, op::v1::TopK::Mode::MAX, op::v1::TopK::SortType::NONE); } else { A1 = make_shared(A); } diff --git a/inference-engine/tests/functional/inference_engine/transformations/nop_elimination.cpp b/inference-engine/tests/functional/inference_engine/transformations/nop_elimination.cpp index 2033d6feaaa..d6e7d1533d9 100644 --- a/inference-engine/tests/functional/inference_engine/transformations/nop_elimination.cpp +++ b/inference-engine/tests/functional/inference_engine/transformations/nop_elimination.cpp @@ -258,8 +258,16 @@ TEST(nop_elimination, squeeze_unsqueeze_overlap_elimination) { auto A = make_shared(element::f32, shape); shared_ptr A1; if (multiout) { + shared_ptr k; auto last_dim = shape.rank().get_length() - 1; - A1 = make_shared(A, last_dim, element::i32); + if (shape[last_dim].is_dynamic()) { + k = make_shared(make_shared(A), + op::Constant::create(element::i64, {}, {last_dim}), + op::Constant::create(element::i64, {}, {0})); + } else { + k = make_shared(element::i64, Shape{}, std::vector{shape[last_dim].get_length()}); + } + A1 = make_shared(A, k, last_dim, op::v1::TopK::Mode::MAX, op::v1::TopK::SortType::NONE); } else { A1 = make_shared(A); } @@ -730,7 +738,7 @@ TEST(nop_elimination, topk_convert_elimination) { auto check_usecase = []() { auto A = make_shared(element::f32, Shape{20, 3, 4}); auto A1 = make_shared(A); - auto B = make_shared(A1, 0, element::i64, 10); + auto B = make_shared(A1, op::Constant::create(element::i64, {}, {10}), 0, op::v1::TopK::Mode::MAX, op::v1::TopK::SortType::NONE); auto C = make_shared(B->output(0), B->output(0).get_element_type()); auto baseline_f = make_shared(make_shared(C), ParameterVector{A}); auto optimized_f = clone_function(*baseline_f); diff --git a/ngraph/core/include/ngraph/op/op_version_tbl.hpp b/ngraph/core/include/ngraph/op/op_version_tbl.hpp index 1aee4ef5e0a..4773030a5ef 100644 --- a/ngraph/core/include/ngraph/op/op_version_tbl.hpp +++ b/ngraph/core/include/ngraph/op/op_version_tbl.hpp @@ -176,7 +176,6 @@ NGRAPH_OP(Tan, ngraph::op::v0, 0) NGRAPH_OP(Tanh, ngraph::op::v0, 0) NGRAPH_OP(TensorIterator, ngraph::op::v0, 0) NGRAPH_OP(Tile, ngraph::op::v0, 0) -NGRAPH_OP(TopK, ngraph::op::v0, 0) NGRAPH_OP(TopK, ngraph::op::v1, 1) NGRAPH_OP(Transpose, ngraph::op::v1, 1) NGRAPH_OP(Unsqueeze, ngraph::op::v0, 0) diff --git a/ngraph/core/include/ngraph/op/topk.hpp b/ngraph/core/include/ngraph/op/topk.hpp index e89cf166349..8a6b13da13d 100644 --- a/ngraph/core/include/ngraph/op/topk.hpp +++ b/ngraph/core/include/ngraph/op/topk.hpp @@ -26,99 +26,6 @@ namespace ngraph { namespace op { - namespace v0 - { - // \brief Computes indices of top k maximum/minimum index along a specified axis for a - // given tensor - class NGRAPH_DEPRECATED( - "This operation is deprecated and will be removed soon. " - "Use v1::TopK instead of it.") NGRAPH_API TopK : public Op - { - NGRAPH_SUPPRESS_DEPRECATED_START - public: - using SortType = TopKSortType; - - static constexpr NodeTypeInfo type_info{"TopK", 0}; - const NodeTypeInfo& get_type_info() const override { return type_info; } - /// \brief Constructs a TopK operation - TopK() = default; - /// \brief Constructs a TopK operation. - /// - /// \param arg The input tensor - /// \param top_k_axis The axis along which to compute top k indices - /// \param index_element_type produce indices. Currently, only int64 or int32 are - /// supported - /// \param k Number of top indices to compute. Compute all indices if k = 0 - /// \param compute_max Compute top k max or top k min? - /// \param sort SortType for sorting results, default - SORT_VALUES - TopK(const Output& arg, - size_t top_k_axis, - const element::Type& index_element_type, - size_t k = 0, - bool compute_max = true, - SortType sort = SortType::SORT_VALUES); - /// \brief Constructs a TopK operation. - /// - /// \param arg The input tensor - /// \param k Number of top indices to compute. Compute all indices if k = 0 - /// \param top_k_axis The axis along which to compute top k indices - /// \param index_element_type produce indices. Currently, only int64 or int32 are - /// supported - /// \param compute_max Compute top k max or top k min? - /// \param sort SortType for sorting results, default - SORT_VALUES - TopK(const Output& arg, - const Output& k, - size_t top_k_axis, - const element::Type& index_element_type, - bool compute_max = true, - SortType sort = SortType::SORT_VALUES); - - /// \brief Constructs a TopK operation. - /// - /// \param arg The input tensor - /// \param k Number of top indices to compute. Compute all indices if k = 0 - /// \param top_k_axis The axis along which to compute top k indices - /// \param index_element_type produce indices. Currently, only int64 or int32 are - /// supported - /// \param compute_max Compute top k max or top k min? - /// \param sort SortType for sorting results, default - NONE - TopK(const Output& arg, - const Output& k, - const Output& top_k_axis, - const element::Type& index_element_type, - bool compute_max = true, - SortType sort = SortType::NONE); - - void validate_and_infer_types() override; - - virtual std::shared_ptr - clone_with_new_inputs(const OutputVector& new_args) const override; - - size_t get_k() const; - void set_k(size_t k); - - size_t get_top_k_axis() const; - Dimension get_top_k_axis_dynamic() const; - void set_top_k_axis(size_t k); - - element::Type get_index_element_type() const { return m_index_element_type; } - bool get_compute_max() const { return m_compute_max; } - SortType get_sort() const { return m_sort; } - size_t get_default_output_index() const override { return no_default_index(); } - bool evaluate(const HostTensorVector& outputs, - const HostTensorVector& inputs) const override; - - protected: - element::Type m_index_element_type; - bool m_compute_max{false}; - SortType m_sort{SortType::NONE}; - Shape compute_output_shape(const Shape input_shape, - const int64_t k, - const size_t axis) const; - NGRAPH_SUPPRESS_DEPRECATED_END - }; - } // namespace v0 - namespace v1 { /// \brief Computes indices and values of the k maximum/minimum values @@ -261,9 +168,5 @@ namespace ngraph const element::Type& k_element_type) const override; }; } // namespace v3 - - NGRAPH_SUPPRESS_DEPRECATED_START - using v0::TopK; - NGRAPH_SUPPRESS_DEPRECATED_END - } // op + } // op } // ngraph diff --git a/ngraph/core/reference/include/ngraph/runtime/reference/topk.hpp b/ngraph/core/reference/include/ngraph/runtime/reference/topk.hpp index 575a59e2895..4faac42c949 100644 --- a/ngraph/core/reference/include/ngraph/runtime/reference/topk.hpp +++ b/ngraph/core/reference/include/ngraph/runtime/reference/topk.hpp @@ -80,7 +80,7 @@ namespace ngraph size_t axis, size_t k, bool compute_max, - op::TopK::SortType sort = op::TopK::SortType::NONE) + op::v1::TopK::SortType sort = op::v1::TopK::SortType::NONE) { using namespace std; // reorder source axis visit order and make "axis" inner most @@ -137,13 +137,13 @@ namespace ngraph { switch (sort) { - case op::TopK::SortType::NONE: break; - case op::TopK::SortType::SORT_INDICES: + case op::v1::TopK::SortType::NONE: break; + case op::v1::TopK::SortType::SORT_INDICES: std::sort(workspace.begin(), workspace.begin() + k, sort_indices_descending); break; - case op::TopK::SortType::SORT_VALUES: + case op::v1::TopK::SortType::SORT_VALUES: std::sort(workspace.begin(), workspace.begin() + k, compare_max); break; } @@ -152,13 +152,13 @@ namespace ngraph { switch (sort) { - case op::TopK::SortType::NONE: break; - case op::TopK::SortType::SORT_INDICES: + case op::v1::TopK::SortType::NONE: break; + case op::v1::TopK::SortType::SORT_INDICES: std::sort(workspace.begin(), workspace.begin() + k, sort_indices_ascending); break; - case op::TopK::SortType::SORT_VALUES: + case op::v1::TopK::SortType::SORT_VALUES: std::sort(workspace.begin(), workspace.begin() + k, compare_min); break; } diff --git a/ngraph/core/src/op/topk.cpp b/ngraph/core/src/op/topk.cpp index 830f36635b2..17155dc14d2 100644 --- a/ngraph/core/src/op/topk.cpp +++ b/ngraph/core/src/op/topk.cpp @@ -28,204 +28,9 @@ #include "ngraph/runtime/host_tensor.hpp" #include "ngraph/runtime/reference/topk.hpp" -NGRAPH_SUPPRESS_DEPRECATED_START - using namespace std; using namespace ngraph; -constexpr NodeTypeInfo op::v0::TopK::type_info; - -op::v0::TopK::TopK(const Output& arg, - size_t top_k_axis, - const element::Type& index_element_type, - size_t k, - bool compute_max, - SortType sort) - : Op({arg}) - , m_index_element_type(index_element_type) - , m_compute_max(compute_max) - , m_sort(sort) -{ - set_argument(1, op::Constant::create(element::i64, Shape{1}, {k})->output(0)); - set_argument(2, op::Constant::create(element::i64, Shape{1}, {top_k_axis})->output(0)); - add_provenance_group_member(input_value(1).get_node_shared_ptr()); - add_provenance_group_member(input_value(2).get_node_shared_ptr()); - constructor_validate_and_infer_types(); -} - -op::v0::TopK::TopK(const Output& arg, - const Output& k, - size_t top_k_axis, - const element::Type& index_element_type, - bool compute_max, - SortType sort) - : Op({arg, k}) - , m_index_element_type(index_element_type) - , m_compute_max(compute_max) - , m_sort(sort) -{ - set_argument(2, op::Constant::create(element::i64, Shape{1}, {top_k_axis})->output(0)); - add_provenance_group_member(input_value(2).get_node_shared_ptr()); - constructor_validate_and_infer_types(); -} - -op::v0::TopK::TopK(const Output& arg, - const Output& k, - const Output& top_k_axis, - const element::Type& index_element_type, - bool compute_max, - SortType sort) - : Op({arg, k, top_k_axis}) - , m_index_element_type(index_element_type) - , m_compute_max(compute_max) - , m_sort(sort) -{ - constructor_validate_and_infer_types(); -} - -size_t op::v0::TopK::get_k() const -{ - size_t k = 0; - if (auto const_op = as_type_ptr(input_value(1).get_node_shared_ptr())) - { - k = const_op->cast_vector()[0]; - } - Dimension top_k_axis = get_top_k_axis_dynamic(); - if (k == 0 && get_input_partial_shape(0).is_static() && top_k_axis.is_static()) - { - k = get_input_partial_shape(0).to_shape()[top_k_axis.get_length()]; - } - return k; -} - -void op::v0::TopK::set_k(size_t k) -{ - shared_ptr current_const = - get_input_size() == 1 ? nullptr : input_value(1).get_node_shared_ptr(); - auto replacement_const = op::Constant::create(element::i64, Shape{1}, {k})->output(0); - this->input(1).replace_source_output(replacement_const); - replace_provenance_group_member(current_const, replacement_const.get_node_shared_ptr()); -} - -size_t op::v0::TopK::get_top_k_axis() const -{ - auto d = get_top_k_axis_dynamic(); - NGRAPH_CHECK(d.is_static(), - "get_top_k_axis called on a TopK node whose 'top_k_axis' input is not constant"); - return d.get_length(); -} - -Dimension op::v0::TopK::get_top_k_axis_dynamic() const -{ - auto const_op = dynamic_pointer_cast(input_value(2).get_node_shared_ptr()); - if (const_op) - { - return const_op->cast_vector()[0]; - } - else - { - return Dimension::dynamic(); - } -} - -void op::v0::TopK::set_top_k_axis(size_t top_k_axis) -{ - shared_ptr current_const = input_value(2).get_node_shared_ptr(); - auto replacement_const = op::Constant::create(element::i64, Shape{1}, {top_k_axis})->output(0); - this->input(2).replace_source_output(replacement_const); - replace_provenance_group_member(current_const, replacement_const.get_node_shared_ptr()); -} - -void op::v0::TopK::validate_and_infer_types() -{ - const PartialShape& input_shape = get_input_partial_shape(0); - Rank input_rank = input_shape.rank(); - element::Type input_element_type = get_input_element_type(0); - - NODE_VALIDATION_CHECK( - this, !m_index_element_type.is_dynamic(), "Argument element type must not be dynamic."); - - NODE_VALIDATION_CHECK(this, - m_index_element_type == element::i32 || - m_index_element_type == element::i64, - "Argument element type must be i64 or i32 (got ", - m_index_element_type, - ")."); - - NODE_VALIDATION_CHECK(this, - input_rank.is_dynamic() || input_rank.get_length() > 0, - "Argument rank must be greater than 0."); - - NODE_VALIDATION_CHECK(this, - get_input_element_type(1).compatible(element::i64), - "Element type for 'k' must be i64"); - NODE_VALIDATION_CHECK(this, - get_input_element_type(2).compatible(element::i64), - "Element type for 'top_k_axis' must be i64"); - - Dimension top_k_axis = get_top_k_axis_dynamic(); - NODE_VALIDATION_CHECK(this, - input_rank.is_dynamic() || top_k_axis.is_dynamic() || - top_k_axis.get_length() < input_rank.get_length(), - "TopK axis (", - top_k_axis, - ") is out of bounds."); - - size_t k = get_k(); - NODE_VALIDATION_CHECK(this, - input_rank.is_dynamic() || top_k_axis.is_dynamic() || - input_shape[top_k_axis.get_length()].is_dynamic() || - static_cast(k) <= - input_shape[top_k_axis.get_length()].get_length(), - "K (", - k, - ") exceeds the dimension (", - input_shape[top_k_axis.get_length()], - ") of the TopK axis (axis ", - top_k_axis, - ")."); - - PartialShape output_shape{input_shape}; - - if (input_rank.is_static()) - { - if (top_k_axis.is_static()) - { - if (k != 0) - { - output_shape[top_k_axis.get_length()] = k; - } - else if (k == 0 && output_shape[top_k_axis.get_length()].is_static()) - { - output_shape[top_k_axis.get_length()] = input_shape[top_k_axis.get_length()]; - } - } - else - { - // If top_k_axis is not static and k is not 0, then we could be changing any - // dimension. So we have to change all dimensions to dynamic. - output_shape = PartialShape::dynamic(input_rank); - } - } - - set_input_is_relevant_to_shape(2); - - set_output_size(2); - set_output_type(0, m_index_element_type, output_shape); - set_output_type(1, input_element_type, output_shape); -} - -shared_ptr op::v0::TopK::clone_with_new_inputs(const OutputVector& new_args) const -{ - check_new_args_count(this, new_args); - return make_shared(new_args.at(0), - new_args.at(1), - new_args.at(2), - m_index_element_type, - m_compute_max, - m_sort); -} - namespace topk { template @@ -236,7 +41,7 @@ namespace topk const size_t axis, const size_t k, const bool compute_max, - const op::TopK::SortType sort) + const op::v1::TopK::SortType sort) { using T = typename element_type_traits::value_type; using U = typename element_type_traits::value_type; @@ -267,7 +72,7 @@ namespace topk const size_t axis, const size_t k, const bool max, - const op::TopK::SortType sort, + const op::v1::TopK::SortType sort, const element::Type index_et) { bool rc = true; @@ -293,7 +98,7 @@ namespace topk const size_t axis, const size_t k, const bool max, - const op::TopK::SortType sort, + const op::v1::TopK::SortType sort, const element::Type index_et) { bool rc = true; @@ -356,77 +161,6 @@ namespace topk } return k; } - - // used in only v0, where type is set as int64_t - size_t read_top_k_axis_from_host_tensor(const HostTensorPtr& arg) - { - NGRAPH_CHECK(arg->get_element_type() == element::i64, - "TopK axis element type should be i64"); - auto p = arg->get_data_ptr(); - size_t axis = static_cast(p[0]); - return axis; - } -} - -Shape op::v0::TopK::compute_output_shape(const Shape input_shape, - const int64_t k, - const size_t axis) const -{ - Shape output_shape{input_shape}; - if (k != 0) - { - output_shape[axis] = k; - } - return output_shape; -} - -bool op::v0::TopK::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const -{ - OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v0::TopK::evaluate"); - - // check data types for arg, k and output element type - Shape arg_shape = inputs[0]->get_shape(); - - // 1. get axis, mode ( max/min), sort_type - size_t axis = 0; - Dimension axis_dim = get_top_k_axis_dynamic(); - if (axis_dim.is_static()) - { - axis = axis_dim.get_length(); - } - else - { - axis = topk::read_top_k_axis_from_host_tensor(inputs[2]); - NGRAPH_CHECK(axis <= arg_shape.size(), "TopK axis is out of bounds"); - } - bool compute_max = get_compute_max(); - SortType sort_type = get_sort(); - - // 2. get value of k - from constant node or from HT - size_t k = get_k(); - if (k == 0) - { - k = topk::read_k_from_host_tensor(inputs[1]); - if (k == 0) - { - // the kernel can't handle k = 0, but output_shape[axis] = arg_shape[axis] - k = arg_shape[axis]; - } - } - NGRAPH_CHECK(k <= arg_shape.at(axis), "K exceeds the dimension of the TopK axis"); - - // 3. Compute output_shape - auto output_shape = compute_output_shape(inputs[0]->get_shape(), k, axis); - - return topk::evaluate_topk(inputs[0], - outputs[0], - outputs[1], - output_shape, - axis, - k, - compute_max, - sort_type, - get_index_element_type()); } // v1 version starts diff --git a/ngraph/test/backend/topk.in.cpp b/ngraph/test/backend/topk.in.cpp index b275b1d9782..6218a430722 100644 --- a/ngraph/test/backend/topk.in.cpp +++ b/ngraph/test/backend/topk.in.cpp @@ -33,8 +33,6 @@ #include "util/test_control.hpp" #include "util/test_tools.hpp" -NGRAPH_SUPPRESS_DEPRECATED_START - using namespace std; using namespace ngraph; @@ -64,12 +62,21 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_resnet50) Shape rshape5{128, 5}; Shape rshape1{128, 1}; auto A = make_shared(element::f32, shape); - auto B = make_shared(A, 1, element::i32, 5, true); - auto C = make_shared(A, 1, element::i32, 1, true); - auto out5_value = B->output(1); - auto out5_index = B->output(0); - auto out1_value = C->output(1); - auto out1_index = C->output(0); + auto B = make_shared(A, + op::Constant::create(element::i64, {}, {5}), + 1, + op::v1::TopK::Mode::MAX, + op::v1::TopK::SortType::SORT_VALUES); + auto C = make_shared(A, + op::Constant::create(element::i64, {}, {1}), + 1, + op::v1::TopK::Mode::MAX, + op::v1::TopK::SortType::SORT_VALUES); + + auto out5_value = B->output(0); + auto out5_index = B->output(1); + auto out1_value = C->output(0); + auto out1_index = C->output(1); auto f = make_shared(OutputVector{out5_value, out5_index, out1_value, out1_index}, ParameterVector{A}); @@ -133,9 +140,12 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_max_sort_none) Shape shape{128, 1000}; Shape rshape{128, 5}; auto A = make_shared(element::f32, shape); - auto B = make_shared(A, 1, element::i32, 5, true, op::TopK::SortType::NONE); - auto out_value = B->output(1); - auto out_index = B->output(0); + auto k = op::Constant::create(element::i64, {}, {5}); + int64_t axis = 1; + auto B = make_shared( + A, k, axis, op::v1::TopK::Mode::MAX, op::v1::TopK::SortType::NONE); + auto out_value = B->output(0); + auto out_index = B->output(1); auto f = make_shared(OutputVector{out_value, out_index}, ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); @@ -184,9 +194,12 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_min_sort_none) Shape shape{128, 1000}; Shape rshape{128, 5}; auto A = make_shared(element::f32, shape); - auto B = make_shared(A, 1, element::i32, 5, false, op::TopK::SortType::NONE); - auto out_value = B->output(1); - auto out_index = B->output(0); + auto k = op::Constant::create(element::i64, {}, {5}); + int64_t axis = 1; + auto B = make_shared( + A, k, axis, op::v1::TopK::Mode::MIN, op::v1::TopK::SortType::NONE); + auto out_value = B->output(0); + auto out_index = B->output(1); auto f = make_shared(OutputVector{out_value, out_index}, ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); @@ -235,9 +248,12 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_max_sort_value) Shape shape{128, 1000}; Shape rshape{128, 5}; auto A = make_shared(element::f32, shape); - auto B = make_shared(A, 1, element::i32, 5, true, op::TopK::SortType::SORT_VALUES); - auto out_value = B->output(1); - auto out_index = B->output(0); + auto k = op::Constant::create(element::i64, {}, {5}); + int64_t axis = 1; + auto B = make_shared( + A, k, axis, op::v1::TopK::Mode::MAX, op::v1::TopK::SortType::SORT_VALUES); + auto out_value = B->output(0); + auto out_index = B->output(1); auto f = make_shared(OutputVector{out_value, out_index}, ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); @@ -282,9 +298,12 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_min_sort_value) Shape shape{128, 1000}; Shape rshape{128, 5}; auto A = make_shared(element::f32, shape); - auto B = make_shared(A, 1, element::i32, 5, false, op::TopK::SortType::SORT_VALUES); - auto out_value = B->output(1); - auto out_index = B->output(0); + auto k = op::Constant::create(element::i64, {}, {5}); + int64_t axis = 1; + auto B = make_shared( + A, k, axis, op::v1::TopK::Mode::MIN, op::v1::TopK::SortType::SORT_VALUES); + auto out_value = B->output(0); + auto out_index = B->output(1); auto f = make_shared(OutputVector{out_value, out_index}, ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); @@ -333,9 +352,12 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_max_sort_index) Shape shape{128, 1000}; Shape rshape{128, 5}; auto A = make_shared(element::f32, shape); - auto B = make_shared(A, 1, element::i32, 5, true, op::TopK::SortType::SORT_INDICES); - auto out_value = B->output(1); - auto out_index = B->output(0); + auto k = op::Constant::create(element::i64, {}, {5}); + int64_t axis = 1; + auto B = make_shared( + A, k, axis, op::v1::TopK::Mode::MAX, op::v1::TopK::SortType::SORT_INDICES); + auto out_value = B->output(0); + auto out_index = B->output(1); auto f = make_shared(OutputVector{out_value, out_index}, ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); @@ -384,9 +406,12 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_min_sort_index) Shape shape{128, 1000}; Shape rshape{128, 5}; auto A = make_shared(element::f32, shape); - auto B = make_shared(A, 1, element::i32, 5, false, op::TopK::SortType::SORT_INDICES); - auto out_value = B->output(1); - auto out_index = B->output(0); + auto k = op::Constant::create(element::i64, {}, {5}); + int64_t axis = 1; + auto B = make_shared( + A, k, axis, op::v1::TopK::Mode::MIN, op::v1::TopK::SortType::SORT_INDICES); + auto out_value = B->output(0); + auto out_index = B->output(1); auto f = make_shared(OutputVector{out_value, out_index}, ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); @@ -435,7 +460,10 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_1d_max_all) Shape shape{6}; Shape rshape{6}; auto A = make_shared(element::f32, shape); - auto B = make_shared(A, 0, element::i32, 0, true); + auto k = op::Constant::create(element::i64, {}, {6}); + int64_t axis = 0; + auto B = make_shared( + A, k, axis, op::v1::TopK::Mode::MAX, op::v1::TopK::SortType::SORT_VALUES); auto f0 = make_shared(OutputVector{B->output(0)}, ParameterVector{A}); auto f1 = make_shared(OutputVector{B->output(1)}, ParameterVector{A}); @@ -444,16 +472,16 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_1d_max_all) // Create some tensors for input/output auto a = backend->create_tensor(element::f32, shape); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result0 = backend->create_tensor(element::i32, rshape); - auto result1 = backend->create_tensor(element::f32, rshape); + auto result0 = backend->create_tensor(element::f32, rshape); + auto result1 = backend->create_tensor(element::i32, rshape); auto h0 = backend->compile(f0); h0->call_with_validate({result0}, {a}); - EXPECT_EQ((vector{5, 4, 3, 2, 1, 0}), read_vector(result0)); + EXPECT_TRUE(test::all_close_f( + (vector{6, 5, 4, 3, 2, 1}), read_vector(result0), MIN_FLOAT_TOLERANCE_BITS)); auto h1 = backend->compile(f1); h1->call_with_validate({result1}, {a}); - EXPECT_TRUE(test::all_close_f( - (vector{6, 5, 4, 3, 2, 1}), read_vector(result1), MIN_FLOAT_TOLERANCE_BITS)); + EXPECT_EQ((vector{5, 4, 3, 2, 1, 0}), read_vector(result1)); } NGRAPH_TEST(${BACKEND_NAME}, topk_1d_i32_max_all) @@ -461,7 +489,10 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_1d_i32_max_all) Shape shape{6}; Shape rshape{6}; auto A = make_shared(element::i32, shape); - auto B = make_shared(A, 0, element::i32, 0, true); + auto k = op::Constant::create(element::i64, {}, {6}); + int64_t axis = 0; + auto B = make_shared( + A, k, axis, op::v1::TopK::Mode::MAX, op::v1::TopK::SortType::SORT_VALUES); auto f0 = make_shared(OutputVector{B->output(0)}, ParameterVector{A}); auto f1 = make_shared(OutputVector{B->output(1)}, ParameterVector{A}); @@ -475,10 +506,10 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_1d_i32_max_all) auto h0 = backend->compile(f0); h0->call_with_validate({result0}, {a}); - EXPECT_EQ((vector{5, 4, 3, 2, 1, 0}), read_vector(result0)); + EXPECT_EQ((vector{6, 5, 4, 3, 2, 1}), read_vector(result0)); auto h1 = backend->compile(f1); h1->call_with_validate({result1}, {a}); - EXPECT_EQ((vector{6, 5, 4, 3, 2, 1}), read_vector(result1)); + EXPECT_EQ((vector{5, 4, 3, 2, 1, 0}), read_vector(result1)); } NGRAPH_TEST(${BACKEND_NAME}, topk_1d_max_partial) @@ -486,7 +517,10 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_1d_max_partial) Shape shape{6}; Shape rshape{3}; auto A = make_shared(element::f32, shape); - auto B = make_shared(A, 0, element::i32, 3, true); + auto k = op::Constant::create(element::i64, {}, {3}); + int64_t axis = 0; + auto B = make_shared( + A, k, axis, op::v1::TopK::Mode::MAX, op::v1::TopK::SortType::SORT_VALUES); auto f0 = make_shared(OutputVector{B->output(0)}, ParameterVector{A}); auto f1 = make_shared(OutputVector{B->output(1)}, ParameterVector{A}); @@ -495,16 +529,16 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_1d_max_partial) // Create some tensors for input/output auto a = backend->create_tensor(element::f32, shape); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result0 = backend->create_tensor(element::i32, rshape); - auto result1 = backend->create_tensor(element::f32, rshape); + auto result0 = backend->create_tensor(element::f32, rshape); + auto result1 = backend->create_tensor(element::i32, rshape); auto h0 = backend->compile(f0); h0->call_with_validate({result0}, {a}); - EXPECT_EQ((vector{5, 4, 3}), read_vector(result0)); + EXPECT_TRUE(test::all_close_f( + (vector{6, 5, 4}), read_vector(result0), MIN_FLOAT_TOLERANCE_BITS)); auto h1 = backend->compile(f1); h1->call_with_validate({result1}, {a}); - EXPECT_TRUE(test::all_close_f( - (vector{6, 5, 4}), read_vector(result1), MIN_FLOAT_TOLERANCE_BITS)); + EXPECT_EQ((vector{5, 4, 3}), read_vector(result1)); } NGRAPH_TEST(${BACKEND_NAME}, topk_1d_max_one) @@ -512,7 +546,10 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_1d_max_one) Shape shape{6}; Shape rshape{1}; auto A = make_shared(element::f32, shape); - auto B = make_shared(A, 0, element::i32, 1, true); + auto k = op::Constant::create(element::i64, {}, {1}); + int64_t axis = 0; + auto B = make_shared( + A, k, axis, op::v1::TopK::Mode::MAX, op::v1::TopK::SortType::SORT_VALUES); auto f0 = make_shared(OutputVector{B->output(0)}, ParameterVector{A}); auto f1 = make_shared(OutputVector{B->output(1)}, ParameterVector{A}); @@ -521,16 +558,16 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_1d_max_one) // Create some tensors for input/output auto a = backend->create_tensor(element::f32, shape); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result0 = backend->create_tensor(element::i32, rshape); - auto result1 = backend->create_tensor(element::f32, rshape); + auto result0 = backend->create_tensor(element::f32, rshape); + auto result1 = backend->create_tensor(element::i32, rshape); auto h0 = backend->compile(f0); h0->call_with_validate({result0}, {a}); - EXPECT_EQ((vector{5}), read_vector(result0)); + EXPECT_TRUE(test::all_close_f( + (vector{6}), read_vector(result0), MIN_FLOAT_TOLERANCE_BITS)); auto h1 = backend->compile(f1); h1->call_with_validate({result1}, {a}); - EXPECT_TRUE(test::all_close_f( - (vector{6}), read_vector(result1), MIN_FLOAT_TOLERANCE_BITS)); + EXPECT_EQ((vector{5}), read_vector(result1)); } NGRAPH_TEST(${BACKEND_NAME}, topk_1d_min_all) @@ -538,7 +575,10 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_1d_min_all) Shape shape{6}; Shape rshape{6}; auto A = make_shared(element::f32, shape); - auto B = make_shared(A, 0, element::i32, 0, false); + auto k = op::Constant::create(element::i64, {}, {6}); + int64_t axis = 0; + auto B = make_shared( + A, k, axis, op::v1::TopK::Mode::MIN, op::v1::TopK::SortType::SORT_VALUES); auto f0 = make_shared(OutputVector{B->output(0)}, ParameterVector{A}); auto f1 = make_shared(OutputVector{B->output(1)}, ParameterVector{A}); @@ -547,16 +587,16 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_1d_min_all) // Create some tensors for input/output auto a = backend->create_tensor(element::f32, shape); copy_data(a, vector{6, 5, 4, 3, 2, 1}); - auto result0 = backend->create_tensor(element::i32, rshape); - auto result1 = backend->create_tensor(element::f32, rshape); + auto result0 = backend->create_tensor(element::f32, rshape); + auto result1 = backend->create_tensor(element::i32, rshape); auto h0 = backend->compile(f0); h0->call_with_validate({result0}, {a}); - EXPECT_EQ((vector{5, 4, 3, 2, 1, 0}), read_vector(result0)); + EXPECT_TRUE(test::all_close_f( + (vector{1, 2, 3, 4, 5, 6}), read_vector(result0), MIN_FLOAT_TOLERANCE_BITS)); auto h1 = backend->compile(f1); h1->call_with_validate({result1}, {a}); - EXPECT_TRUE(test::all_close_f( - (vector{1, 2, 3, 4, 5, 6}), read_vector(result1), MIN_FLOAT_TOLERANCE_BITS)); + EXPECT_EQ((vector{5, 4, 3, 2, 1, 0}), read_vector(result1)); } NGRAPH_TEST(${BACKEND_NAME}, topk_1d_min_partial) @@ -564,7 +604,10 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_1d_min_partial) Shape shape{6}; Shape rshape{3}; auto A = make_shared(element::f32, shape); - auto B = make_shared(A, 0, element::i32, 3, false); + auto k = op::Constant::create(element::i64, {}, {3}); + int64_t axis = 0; + auto B = make_shared( + A, k, axis, op::v1::TopK::Mode::MIN, op::v1::TopK::SortType::SORT_VALUES); auto f0 = make_shared(OutputVector{B->output(0)}, ParameterVector{A}); auto f1 = make_shared(OutputVector{B->output(1)}, ParameterVector{A}); @@ -573,16 +616,16 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_1d_min_partial) // Create some tensors for input/output auto a = backend->create_tensor(element::f32, shape); copy_data(a, vector{6, 5, 4, 3, 2, 1}); - auto result0 = backend->create_tensor(element::i32, rshape); - auto result1 = backend->create_tensor(element::f32, rshape); + auto result0 = backend->create_tensor(element::f32, rshape); + auto result1 = backend->create_tensor(element::i32, rshape); auto h0 = backend->compile(f0); h0->call_with_validate({result0}, {a}); - EXPECT_EQ((vector{5, 4, 3}), read_vector(result0)); + EXPECT_TRUE(test::all_close_f( + (vector{1, 2, 3}), read_vector(result0), MIN_FLOAT_TOLERANCE_BITS)); auto h1 = backend->compile(f1); h1->call_with_validate({result1}, {a}); - EXPECT_TRUE(test::all_close_f( - (vector{1, 2, 3}), read_vector(result1), MIN_FLOAT_TOLERANCE_BITS)); + EXPECT_EQ((vector{5, 4, 3}), read_vector(result1)); } NGRAPH_TEST(${BACKEND_NAME}, topk_1d_min_one) @@ -590,7 +633,10 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_1d_min_one) Shape shape{6}; Shape rshape{1}; auto A = make_shared(element::f32, shape); - auto B = make_shared(A, 0, element::i32, 1, false); + auto k = op::Constant::create(element::i64, {}, {1}); + int64_t axis = 0; + auto B = make_shared( + A, k, axis, op::v1::TopK::Mode::MIN, op::v1::TopK::SortType::SORT_VALUES); auto f0 = make_shared(OutputVector{B->output(0)}, ParameterVector{A}); auto f1 = make_shared(OutputVector{B->output(1)}, ParameterVector{A}); @@ -599,16 +645,16 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_1d_min_one) // Create some tensors for input/output auto a = backend->create_tensor(element::f32, shape); copy_data(a, vector{6, 5, 4, 3, 2, 1}); - auto result0 = backend->create_tensor(element::i32, rshape); - auto result1 = backend->create_tensor(element::f32, rshape); + auto result0 = backend->create_tensor(element::f32, rshape); + auto result1 = backend->create_tensor(element::i32, rshape); auto h0 = backend->compile(f0); h0->call_with_validate({result0}, {a}); - EXPECT_EQ((vector{5}), read_vector(result0)); + EXPECT_TRUE(test::all_close_f( + (vector{1}), read_vector(result0), MIN_FLOAT_TOLERANCE_BITS)); auto h1 = backend->compile(f1); h1->call_with_validate({result1}, {a}); - EXPECT_TRUE(test::all_close_f( - (vector{1}), read_vector(result1), MIN_FLOAT_TOLERANCE_BITS)); + EXPECT_EQ((vector{5}), read_vector(result1)); } NGRAPH_TEST(${BACKEND_NAME}, topk_3d_max_all) @@ -616,7 +662,10 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_3d_max_all) Shape shape{2, 3, 2}; Shape rshape{2, 3, 2}; auto A = make_shared(element::f32, shape); - auto B = make_shared(A, 1, element::i32, 0, true); + auto k = op::Constant::create(element::i64, {}, {3}); + int64_t axis = 1; + auto B = make_shared( + A, k, axis, op::v1::TopK::Mode::MAX, op::v1::TopK::SortType::SORT_VALUES); auto f0 = make_shared(OutputVector{B->output(0)}, ParameterVector{A}); auto f1 = make_shared(OutputVector{B->output(1)}, ParameterVector{A}); @@ -625,17 +674,17 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_3d_max_all) // Create some tensors for input/output auto a = backend->create_tensor(element::f32, shape); copy_data(a, vector{9, 2, 10, 12, 8, 4, 6, 1, 5, 3, 11, 7}); - auto result0 = backend->create_tensor(element::i32, rshape); - auto result1 = backend->create_tensor(element::f32, rshape); + auto result0 = backend->create_tensor(element::f32, rshape); + auto result1 = backend->create_tensor(element::i32, rshape); auto h0 = backend->compile(f0); h0->call_with_validate({result0}, {a}); - EXPECT_EQ((vector{1, 1, 0, 2, 2, 0, 2, 2, 0, 1, 1, 0}), read_vector(result0)); + EXPECT_TRUE(test::all_close_f((vector{10, 12, 9, 4, 8, 2, 11, 7, 6, 3, 5, 1}), + read_vector(result0), + MIN_FLOAT_TOLERANCE_BITS)); auto h1 = backend->compile(f1); h1->call_with_validate({result1}, {a}); - EXPECT_TRUE(test::all_close_f((vector{10, 12, 9, 4, 8, 2, 11, 7, 6, 3, 5, 1}), - read_vector(result1), - MIN_FLOAT_TOLERANCE_BITS)); + EXPECT_EQ((vector{1, 1, 0, 2, 2, 0, 2, 2, 0, 1, 1, 0}), read_vector(result1)); } NGRAPH_TEST(${BACKEND_NAME}, topk_int64) @@ -643,7 +692,10 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_int64) Shape shape{2, 3, 2}; Shape rshape{2, 3, 2}; auto A = make_shared(element::f32, shape); - auto B = make_shared(A, 1, element::i64, 0, true); + auto k = op::Constant::create(element::i64, {}, {3}); + int64_t axis = 1; + auto B = make_shared( + A, k, axis, op::v1::TopK::Mode::MAX, op::v1::TopK::SortType::SORT_VALUES, element::i64); auto f0 = make_shared(OutputVector{B->output(0)}, ParameterVector{A}); auto f1 = make_shared(OutputVector{B->output(1)}, ParameterVector{A}); @@ -652,17 +704,17 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_int64) // Create some tensors for input/output auto a = backend->create_tensor(element::f32, shape); copy_data(a, vector{9, 2, 10, 12, 8, 4, 6, 1, 5, 3, 11, 7}); - auto result0 = backend->create_tensor(element::i64, rshape); - auto result1 = backend->create_tensor(element::f32, rshape); + auto result0 = backend->create_tensor(element::f32, rshape); + auto result1 = backend->create_tensor(element::i64, rshape); auto h0 = backend->compile(f0); h0->call_with_validate({result0}, {a}); - EXPECT_EQ((vector{1, 1, 0, 2, 2, 0, 2, 2, 0, 1, 1, 0}), read_vector(result0)); + EXPECT_TRUE(test::all_close_f((vector{10, 12, 9, 4, 8, 2, 11, 7, 6, 3, 5, 1}), + read_vector(result0), + MIN_FLOAT_TOLERANCE_BITS)); auto h1 = backend->compile(f1); h1->call_with_validate({result1}, {a}); - EXPECT_TRUE(test::all_close_f((vector{10, 12, 9, 4, 8, 2, 11, 7, 6, 3, 5, 1}), - read_vector(result1), - MIN_FLOAT_TOLERANCE_BITS)); + EXPECT_EQ((vector{1, 1, 0, 2, 2, 0, 2, 2, 0, 1, 1, 0}), read_vector(result1)); } NGRAPH_TEST(${BACKEND_NAME}, topk_5d_max_partial) @@ -670,7 +722,10 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_5d_max_partial) Shape shape{2, 6, 3, 2, 4}; Shape rshape{2, 2, 3, 2, 4}; auto A = make_shared(element::f32, shape); - auto B = make_shared(A, 1, element::i32, 2, true); + auto k = op::Constant::create(element::i64, {}, {2}); + int64_t axis = 1; + auto B = make_shared( + A, k, axis, op::v1::TopK::Mode::MAX, op::v1::TopK::SortType::SORT_VALUES); auto f0 = make_shared(OutputVector{B->output(0)}, ParameterVector{A}); auto f1 = make_shared(OutputVector{B->output(1)}, ParameterVector{A}); @@ -703,20 +758,11 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_5d_max_partial) 205., 277., 213., 285., 198., 270., 206., 278., 214., 286., 199., 271., 207., 279., 215., 287., 200., 272., 208., 280., 216., 288.}); - auto result0 = backend->create_tensor(element::i32, rshape); - auto result1 = backend->create_tensor(element::f32, rshape); + auto result0 = backend->create_tensor(element::f32, rshape); + auto result1 = backend->create_tensor(element::i32, rshape); auto h0 = backend->compile(f0); h0->call_with_validate({result0}, {a}); - EXPECT_EQ( - (vector{5, 5, 5, 5, 5, 1, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3, 5, 5, 5, 5, - 3, 3, 3, 3, 3, 4, 3, 3, 3, 3, 3, 1, 3, 3, 3, 3, 1, 1, 1, 1, 3, 3, 3, 3, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3, 5, 5, 5, - 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 1, 1, 4, 1, 1, 1, 1, 1, 1, 5, 1, 3, 3}), - read_vector(result0)); - - auto h1 = backend->compile(f1); - h1->call_with_validate({result1}, {a}); EXPECT_TRUE(test::all_close_f( (vector{169, 241, 177, 249, 185, 233, 170, 242, 178, 250, 186, 258, 171, 243, 179, 251, 187, 259, 172, 224, 180, 252, 188, 260, 149, 221, 157, 229, @@ -725,8 +771,16 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_5d_max_partial) 206, 278, 214, 286, 199, 271, 207, 279, 215, 287, 200, 272, 241, 280, 216, 288, 193, 265, 201, 273, 209, 281, 194, 266, 202, 274, 210, 262, 175, 127, 183, 255, 191, 263, 176, 248, 208, 256, 212, 284}), - read_vector(result1), + read_vector(result0), MIN_FLOAT_TOLERANCE_BITS)); + auto h1 = backend->compile(f1); + h1->call_with_validate({result1}, {a}); + EXPECT_EQ( + (vector{5, 5, 5, 5, 5, 1, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3, 5, 5, 5, 5, + 3, 3, 3, 3, 3, 4, 3, 3, 3, 3, 3, 1, 3, 3, 3, 3, 1, 1, 1, 1, 3, 3, 3, 3, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3, 5, 5, 5, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 1, 1, 4, 1, 1, 1, 1, 1, 1, 5, 1, 3, 3}), + read_vector(result1)); } NGRAPH_TEST(${BACKEND_NAME}, topk_3d_max_partial) @@ -734,7 +788,10 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_3d_max_partial) Shape shape{2, 3, 2}; Shape rshape{2, 2, 2}; auto A = make_shared(element::f32, shape); - auto B = make_shared(A, 1, element::i32, 2, true); + auto k = op::Constant::create(element::i64, {}, {2}); + int64_t axis = 1; + auto B = make_shared( + A, k, axis, op::v1::TopK::Mode::MAX, op::v1::TopK::SortType::SORT_VALUES); auto f0 = make_shared(OutputVector{B->output(0)}, ParameterVector{A}); auto f1 = make_shared(OutputVector{B->output(1)}, ParameterVector{A}); @@ -743,17 +800,17 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_3d_max_partial) // Create some tensors for input/output auto a = backend->create_tensor(element::f32, shape); copy_data(a, vector{9, 2, 10, 12, 8, 4, 6, 1, 5, 3, 11, 7}); - auto result0 = backend->create_tensor(element::i32, rshape); - auto result1 = backend->create_tensor(element::f32, rshape); + auto result0 = backend->create_tensor(element::f32, rshape); + auto result1 = backend->create_tensor(element::i32, rshape); auto h0 = backend->compile(f0); h0->call_with_validate({result0}, {a}); - EXPECT_EQ((vector{1, 1, 0, 2, 2, 2, 0, 1}), read_vector(result0)); + EXPECT_TRUE(test::all_close_f((vector{10, 12, 9, 4, 11, 7, 6, 3}), + read_vector(result0), + MIN_FLOAT_TOLERANCE_BITS)); auto h1 = backend->compile(f1); h1->call_with_validate({result1}, {a}); - EXPECT_TRUE(test::all_close_f((vector{10, 12, 9, 4, 11, 7, 6, 3}), - read_vector(result1), - MIN_FLOAT_TOLERANCE_BITS)); + EXPECT_EQ((vector{1, 1, 0, 2, 2, 2, 0, 1}), read_vector(result1)); } NGRAPH_TEST(${BACKEND_NAME}, topk_3d_max_one) @@ -761,7 +818,10 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_3d_max_one) Shape shape{2, 3, 2}; Shape rshape{2, 1, 2}; auto A = make_shared(element::f32, shape); - auto B = make_shared(A, 1, element::i32, 1, true); + auto k = op::Constant::create(element::i64, {}, {1}); + int64_t axis = 1; + auto B = make_shared( + A, k, axis, op::v1::TopK::Mode::MAX, op::v1::TopK::SortType::SORT_VALUES); auto f0 = make_shared(OutputVector{B->output(0)}, ParameterVector{A}); auto f1 = make_shared(OutputVector{B->output(1)}, ParameterVector{A}); @@ -770,16 +830,16 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_3d_max_one) // Create some tensors for input/output auto a = backend->create_tensor(element::f32, shape); copy_data(a, vector{9, 2, 10, 12, 8, 4, 6, 1, 5, 3, 11, 7}); - auto result0 = backend->create_tensor(element::i32, rshape); - auto result1 = backend->create_tensor(element::f32, rshape); + auto result0 = backend->create_tensor(element::f32, rshape); + auto result1 = backend->create_tensor(element::i32, rshape); auto h0 = backend->compile(f0); h0->call_with_validate({result0}, {a}); - EXPECT_EQ((vector{1, 1, 2, 2}), read_vector(result0)); + EXPECT_TRUE(test::all_close_f( + (vector{10, 12, 11, 7}), read_vector(result0), MIN_FLOAT_TOLERANCE_BITS)); auto h1 = backend->compile(f1); h1->call_with_validate({result1}, {a}); - EXPECT_TRUE(test::all_close_f( - (vector{10, 12, 11, 7}), read_vector(result1), MIN_FLOAT_TOLERANCE_BITS)); + EXPECT_EQ((vector{1, 1, 2, 2}), read_vector(result1)); } NGRAPH_TEST(${BACKEND_NAME}, topk_3d_min_all) @@ -787,7 +847,10 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_3d_min_all) Shape shape{2, 3, 2}; Shape rshape{2, 3, 2}; auto A = make_shared(element::f32, shape); - auto B = make_shared(A, 1, element::i32, 0, false); + auto k = op::Constant::create(element::i64, {}, {3}); + int64_t axis = 1; + auto B = make_shared( + A, k, axis, op::v1::TopK::Mode::MIN, op::v1::TopK::SortType::SORT_VALUES); auto f0 = make_shared(OutputVector{B->output(0)}, ParameterVector{A}); auto f1 = make_shared(OutputVector{B->output(1)}, ParameterVector{A}); @@ -796,17 +859,17 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_3d_min_all) // Create some tensors for input/output auto a = backend->create_tensor(element::f32, shape); copy_data(a, vector{12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}); - auto result0 = backend->create_tensor(element::i32, rshape); - auto result1 = backend->create_tensor(element::f32, rshape); + auto result0 = backend->create_tensor(element::f32, rshape); + auto result1 = backend->create_tensor(element::i32, rshape); auto h0 = backend->compile(f0); h0->call_with_validate({result0}, {a}); - EXPECT_EQ((vector{2, 0, 1, 2, 0, 1, 1, 0, 0, 1, 2, 2}), read_vector(result0)); + EXPECT_TRUE(test::all_close_f((vector{8, 2, 10, 4, 12, 9, 5, 1, 6, 3, 11, 7}), + read_vector(result0), + MIN_FLOAT_TOLERANCE_BITS)); auto h1 = backend->compile(f1); h1->call_with_validate({result1}, {a}); - EXPECT_TRUE(test::all_close_f((vector{8, 2, 10, 4, 12, 9, 5, 1, 6, 3, 11, 7}), - read_vector(result1), - MIN_FLOAT_TOLERANCE_BITS)); + EXPECT_EQ((vector{2, 0, 1, 2, 0, 1, 1, 0, 0, 1, 2, 2}), read_vector(result1)); } NGRAPH_TEST(${BACKEND_NAME}, topk_3d_min_partial) @@ -814,7 +877,10 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_3d_min_partial) Shape shape{2, 3, 2}; Shape rshape{2, 2, 2}; auto A = make_shared(element::f32, shape); - auto B = make_shared(A, 1, element::i32, 2, false); + auto k = op::Constant::create(element::i64, {}, {2}); + int64_t axis = 1; + auto B = make_shared( + A, k, axis, op::v1::TopK::Mode::MIN, op::v1::TopK::SortType::SORT_VALUES); auto f0 = make_shared(OutputVector{B->output(0)}, ParameterVector{A}); auto f1 = make_shared(OutputVector{B->output(1)}, ParameterVector{A}); @@ -823,17 +889,17 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_3d_min_partial) // Create some tensors for input/output auto a = backend->create_tensor(element::f32, shape); copy_data(a, vector{12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}); - auto result0 = backend->create_tensor(element::i32, rshape); - auto result1 = backend->create_tensor(element::f32, rshape); + auto result0 = backend->create_tensor(element::f32, rshape); + auto result1 = backend->create_tensor(element::i32, rshape); auto h0 = backend->compile(f0); h0->call_with_validate({result0}, {a}); - EXPECT_EQ((vector{2, 0, 1, 2, 1, 0, 0, 1}), read_vector(result0)); + EXPECT_TRUE(test::all_close_f((vector{8, 2, 10, 4, 5, 1, 6, 3}), + read_vector(result0), + MIN_FLOAT_TOLERANCE_BITS)); auto h1 = backend->compile(f1); h1->call_with_validate({result1}, {a}); - EXPECT_TRUE(test::all_close_f((vector{8, 2, 10, 4, 5, 1, 6, 3}), - read_vector(result1), - MIN_FLOAT_TOLERANCE_BITS)); + EXPECT_EQ((vector{2, 0, 1, 2, 1, 0, 0, 1}), read_vector(result1)); } NGRAPH_TEST(${BACKEND_NAME}, topk_3d_min_one) @@ -841,7 +907,10 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_3d_min_one) Shape shape{2, 3, 2}; Shape rshape{2, 1, 2}; auto A = make_shared(element::f32, shape); - auto B = make_shared(A, 1, element::i32, 1, false); + auto k = op::Constant::create(element::i64, {}, {1}); + int64_t axis = 1; + auto B = make_shared( + A, k, axis, op::v1::TopK::Mode::MIN, op::v1::TopK::SortType::SORT_VALUES); auto f0 = make_shared(OutputVector{B->output(0)}, ParameterVector{A}); auto f1 = make_shared(OutputVector{B->output(1)}, ParameterVector{A}); @@ -850,16 +919,16 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_3d_min_one) // Create some tensors for input/output auto a = backend->create_tensor(element::f32, shape); copy_data(a, vector{12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}); - auto result0 = backend->create_tensor(element::i32, rshape); - auto result1 = backend->create_tensor(element::f32, rshape); + auto result0 = backend->create_tensor(element::f32, rshape); + auto result1 = backend->create_tensor(element::i32, rshape); auto h0 = backend->compile(f0); h0->call_with_validate({result0}, {a}); - EXPECT_EQ((vector{2, 0, 1, 0}), read_vector(result0)); + EXPECT_TRUE(test::all_close_f( + (vector{8, 2, 5, 1}), read_vector(result0), MIN_FLOAT_TOLERANCE_BITS)); auto h1 = backend->compile(f1); h1->call_with_validate({result1}, {a}); - EXPECT_TRUE(test::all_close_f( - (vector{8, 2, 5, 1}), read_vector(result1), MIN_FLOAT_TOLERANCE_BITS)); + EXPECT_EQ((vector{2, 0, 1, 0}), read_vector(result1)); } NGRAPH_TEST(${BACKEND_NAME}, topk_2d_max_all) @@ -867,7 +936,10 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_2d_max_all) Shape shape{4, 3}; Shape rshape{4, 3}; auto A = make_shared(element::f32, shape); - auto B = make_shared(A, 0, element::i32, 4, true); + auto k = op::Constant::create(element::i64, {}, {4}); + int64_t axis = 0; + auto B = make_shared( + A, k, axis, op::v1::TopK::Mode::MAX, op::v1::TopK::SortType::SORT_VALUES); auto f0 = make_shared(OutputVector{B->output(0)}, ParameterVector{A}); auto f1 = make_shared(OutputVector{B->output(1)}, ParameterVector{A}); @@ -876,17 +948,17 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_2d_max_all) // Create some tensors for input/output auto a = backend->create_tensor(element::f32, shape); copy_data(a, vector{9, 2, 10, 12, 8, 4, 6, 1, 5, 3, 11, 7}); - auto result0 = backend->create_tensor(element::i32, rshape); - auto result1 = backend->create_tensor(element::f32, rshape); + auto result0 = backend->create_tensor(element::f32, rshape); + auto result1 = backend->create_tensor(element::i32, rshape); auto h0 = backend->compile(f0); h0->call_with_validate({result0}, {a}); - EXPECT_EQ((vector{1, 3, 0, 0, 1, 3, 2, 0, 2, 3, 2, 1}), read_vector(result0)); + EXPECT_TRUE(test::all_close_f((vector{12, 11, 10, 9, 8, 7, 6, 2, 5, 3, 1, 4}), + read_vector(result0), + MIN_FLOAT_TOLERANCE_BITS)); auto h1 = backend->compile(f1); h1->call_with_validate({result1}, {a}); - EXPECT_TRUE(test::all_close_f((vector{12, 11, 10, 9, 8, 7, 6, 2, 5, 3, 1, 4}), - read_vector(result1), - MIN_FLOAT_TOLERANCE_BITS)); + EXPECT_EQ((vector{1, 3, 0, 0, 1, 3, 2, 0, 2, 3, 2, 1}), read_vector(result1)); } NGRAPH_TEST(${BACKEND_NAME}, topk_2d_max_partial) @@ -894,7 +966,10 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_2d_max_partial) Shape shape{4, 3}; Shape rshape{2, 3}; auto A = make_shared(element::f32, shape); - auto B = make_shared(A, 0, element::i32, 2, true); + auto k = op::Constant::create(element::i64, {}, {2}); + int64_t axis = 0; + auto B = make_shared( + A, k, axis, op::v1::TopK::Mode::MAX, op::v1::TopK::SortType::SORT_VALUES); auto f0 = make_shared(OutputVector{B->output(0)}, ParameterVector{A}); auto f1 = make_shared(OutputVector{B->output(1)}, ParameterVector{A}); @@ -903,17 +978,17 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_2d_max_partial) // Create some tensors for input/output auto a = backend->create_tensor(element::f32, shape); copy_data(a, vector{9, 2, 10, 12, 8, 4, 6, 1, 5, 3, 11, 7}); - auto result0 = backend->create_tensor(element::i32, rshape); - auto result1 = backend->create_tensor(element::f32, rshape); + auto result0 = backend->create_tensor(element::f32, rshape); + auto result1 = backend->create_tensor(element::i32, rshape); auto h0 = backend->compile(f0); h0->call_with_validate({result0}, {a}); - EXPECT_EQ((vector{1, 3, 0, 0, 1, 3}), read_vector(result0)); + EXPECT_TRUE(test::all_close_f((vector{12, 11, 10, 9, 8, 7}), + read_vector(result0), + MIN_FLOAT_TOLERANCE_BITS)); auto h1 = backend->compile(f1); h1->call_with_validate({result1}, {a}); - EXPECT_TRUE(test::all_close_f((vector{12, 11, 10, 9, 8, 7}), - read_vector(result1), - MIN_FLOAT_TOLERANCE_BITS)); + EXPECT_EQ((vector{1, 3, 0, 0, 1, 3}), read_vector(result1)); } NGRAPH_TEST(${BACKEND_NAME}, topk_2d_max_one) @@ -921,7 +996,10 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_2d_max_one) Shape shape{4, 3}; Shape rshape{1, 3}; auto A = make_shared(element::f32, shape); - auto B = make_shared(A, 0, element::i32, 1, true); + auto k = op::Constant::create(element::i64, {}, {1}); + int64_t axis = 0; + auto B = make_shared( + A, k, axis, op::v1::TopK::Mode::MAX, op::v1::TopK::SortType::SORT_VALUES); auto f0 = make_shared(OutputVector{B->output(0)}, ParameterVector{A}); auto f1 = make_shared(OutputVector{B->output(1)}, ParameterVector{A}); @@ -930,16 +1008,16 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_2d_max_one) // Create some tensors for input/output auto a = backend->create_tensor(element::f32, shape); copy_data(a, vector{9, 2, 10, 12, 8, 4, 6, 1, 5, 3, 11, 7}); - auto result0 = backend->create_tensor(element::i32, rshape); - auto result1 = backend->create_tensor(element::f32, rshape); + auto result0 = backend->create_tensor(element::f32, rshape); + auto result1 = backend->create_tensor(element::i32, rshape); auto h0 = backend->compile(f0); h0->call_with_validate({result0}, {a}); - EXPECT_EQ((vector{1, 3, 0}), read_vector(result0)); + EXPECT_TRUE(test::all_close_f( + (vector{12, 11, 10}), read_vector(result0), MIN_FLOAT_TOLERANCE_BITS)); auto h1 = backend->compile(f1); h1->call_with_validate({result1}, {a}); - EXPECT_TRUE(test::all_close_f( - (vector{12, 11, 10}), read_vector(result1), MIN_FLOAT_TOLERANCE_BITS)); + EXPECT_EQ((vector{1, 3, 0}), read_vector(result1)); } NGRAPH_TEST(${BACKEND_NAME}, topk_2d_max_one_with_equal_values) @@ -947,7 +1025,10 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_2d_max_one_with_equal_values) Shape shape{2, 4}; Shape rshape{2, 1}; auto A = make_shared(element::f32, shape); - auto B = make_shared(A, 1, element::i32, 1, true); + auto k = op::Constant::create(element::i64, {}, {1}); + int64_t axis = 1; + auto B = make_shared( + A, k, axis, op::v1::TopK::Mode::MAX, op::v1::TopK::SortType::SORT_VALUES); auto f0 = make_shared(OutputVector{B->output(0)}, ParameterVector{A}); auto f1 = make_shared(OutputVector{B->output(1)}, ParameterVector{A}); @@ -956,16 +1037,16 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_2d_max_one_with_equal_values) // Create some tensors for input/output auto a = backend->create_tensor(element::f32, shape); copy_data(a, vector{1, 3, 2, 4, 1, 3, 3, 2}); - auto result0 = backend->create_tensor(element::i32, rshape); - auto result1 = backend->create_tensor(element::f32, rshape); + auto result0 = backend->create_tensor(element::f32, rshape); + auto result1 = backend->create_tensor(element::i32, rshape); auto h0 = backend->compile(f0); h0->call_with_validate({result0}, {a}); - EXPECT_EQ((vector{3, 1}), read_vector(result0)); + EXPECT_TRUE(test::all_close_f( + (vector{4, 3}), read_vector(result0), MIN_FLOAT_TOLERANCE_BITS)); auto h1 = backend->compile(f1); h1->call_with_validate({result1}, {a}); - EXPECT_TRUE(test::all_close_f( - (vector{4, 3}), read_vector(result1), MIN_FLOAT_TOLERANCE_BITS)); + EXPECT_EQ((vector{3, 1}), read_vector(result1)); } NGRAPH_TEST(${BACKEND_NAME}, topk_2d_min_all) @@ -973,7 +1054,10 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_2d_min_all) Shape shape{4, 3}; Shape rshape{4, 3}; auto A = make_shared(element::f32, shape); - auto B = make_shared(A, 0, element::i32, 4, false); + auto k = op::Constant::create(element::i64, {}, {4}); + int64_t axis = 0; + auto B = make_shared( + A, k, axis, op::v1::TopK::Mode::MIN, op::v1::TopK::SortType::SORT_VALUES); auto f0 = make_shared(OutputVector{B->output(0)}, ParameterVector{A}); auto f1 = make_shared(OutputVector{B->output(1)}, ParameterVector{A}); @@ -982,17 +1066,17 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_2d_min_all) // Create some tensors for input/output auto a = backend->create_tensor(element::f32, shape); copy_data(a, vector{12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}); - auto result0 = backend->create_tensor(element::i32, rshape); - auto result1 = backend->create_tensor(element::f32, rshape); + auto result0 = backend->create_tensor(element::f32, rshape); + auto result1 = backend->create_tensor(element::i32, rshape); auto h0 = backend->compile(f0); h0->call_with_validate({result0}, {a}); - EXPECT_EQ((vector{3, 2, 1, 2, 0, 2, 1, 1, 3, 0, 3, 0}), read_vector(result0)); + EXPECT_TRUE(test::all_close_f((vector{3, 1, 4, 6, 2, 5, 9, 8, 7, 12, 11, 10}), + read_vector(result0), + MIN_FLOAT_TOLERANCE_BITS)); auto h1 = backend->compile(f1); h1->call_with_validate({result1}, {a}); - EXPECT_TRUE(test::all_close_f((vector{3, 1, 4, 6, 2, 5, 9, 8, 7, 12, 11, 10}), - read_vector(result1), - MIN_FLOAT_TOLERANCE_BITS)); + EXPECT_EQ((vector{3, 2, 1, 2, 0, 2, 1, 1, 3, 0, 3, 0}), read_vector(result1)); } NGRAPH_TEST(${BACKEND_NAME}, topk_2d_min_partial) @@ -1000,7 +1084,10 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_2d_min_partial) Shape shape{4, 3}; Shape rshape{2, 3}; auto A = make_shared(element::f32, shape); - auto B = make_shared(A, 0, element::i32, 2, false); + auto k = op::Constant::create(element::i64, {}, {2}); + int64_t axis = 0; + auto B = make_shared( + A, k, axis, op::v1::TopK::Mode::MIN, op::v1::TopK::SortType::SORT_VALUES); auto f0 = make_shared(OutputVector{B->output(0)}, ParameterVector{A}); auto f1 = make_shared(OutputVector{B->output(1)}, ParameterVector{A}); @@ -1009,16 +1096,16 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_2d_min_partial) // Create some tensors for input/output auto a = backend->create_tensor(element::f32, shape); copy_data(a, vector{12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}); - auto result0 = backend->create_tensor(element::i32, rshape); - auto result1 = backend->create_tensor(element::f32, rshape); + auto result0 = backend->create_tensor(element::f32, rshape); + auto result1 = backend->create_tensor(element::i32, rshape); auto h0 = backend->compile(f0); h0->call_with_validate({result0}, {a}); - EXPECT_EQ((vector{3, 2, 1, 2, 0, 2}), read_vector(result0)); + EXPECT_TRUE(test::all_close_f( + (vector{3, 1, 4, 6, 2, 5}), read_vector(result0), MIN_FLOAT_TOLERANCE_BITS)); auto h1 = backend->compile(f1); h1->call_with_validate({result1}, {a}); - EXPECT_TRUE(test::all_close_f( - (vector{3, 1, 4, 6, 2, 5}), read_vector(result1), MIN_FLOAT_TOLERANCE_BITS)); + EXPECT_EQ((vector{3, 2, 1, 2, 0, 2}), read_vector(result1)); } NGRAPH_TEST(${BACKEND_NAME}, topk_2d_min_one) @@ -1026,7 +1113,10 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_2d_min_one) Shape shape{4, 3}; Shape rshape{1, 3}; auto A = make_shared(element::f32, shape); - auto B = make_shared(A, 0, element::i32, 1, false); + auto k = op::Constant::create(element::i64, {}, {1}); + int64_t axis = 0; + auto B = make_shared( + A, k, axis, op::v1::TopK::Mode::MIN, op::v1::TopK::SortType::NONE); auto f0 = make_shared(OutputVector{B->output(0)}, ParameterVector{A}); auto f1 = make_shared(OutputVector{B->output(1)}, ParameterVector{A}); @@ -1035,16 +1125,16 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_2d_min_one) // Create some tensors for input/output auto a = backend->create_tensor(element::f32, shape); copy_data(a, vector{12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}); - auto result0 = backend->create_tensor(element::i32, rshape); - auto result1 = backend->create_tensor(element::f32, rshape); + auto result0 = backend->create_tensor(element::f32, rshape); + auto result1 = backend->create_tensor(element::i32, rshape); auto h0 = backend->compile(f0); h0->call_with_validate({result0}, {a}); - EXPECT_EQ((vector{3, 2, 1}), read_vector(result0)); + EXPECT_TRUE(test::all_close_f( + (vector{3, 1, 4}), read_vector(result0), MIN_FLOAT_TOLERANCE_BITS)); auto h1 = backend->compile(f1); h1->call_with_validate({result1}, {a}); - EXPECT_TRUE(test::all_close_f( - (vector{3, 1, 4}), read_vector(result1), MIN_FLOAT_TOLERANCE_BITS)); + EXPECT_EQ((vector{3, 2, 1}), read_vector(result1)); } NGRAPH_TEST(${BACKEND_NAME}, topk_3d_large_input_max) @@ -1052,7 +1142,10 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_3d_large_input_max) Shape shape{4, 8192, 5}; auto A = make_shared(element::f32, shape); - auto B = make_shared(A, 1, element::i32, 10, true); + auto k = op::Constant::create(element::i64, {}, {10}); + int64_t axis = 1; + auto B = make_shared( + A, k, axis, op::v1::TopK::Mode::MAX, op::v1::TopK::SortType::SORT_VALUES); auto interp_f_0 = make_shared(OutputVector{B->output(0)}, ParameterVector{A}); auto interp_f_1 = make_shared(OutputVector{B->output(1)}, ParameterVector{A}); @@ -1067,20 +1160,20 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_3d_large_input_max) args.push_back(tensor_val); } - auto interp_results_0 = execute(interp_f_0, args, "INTERPRETER"); - auto gpu_results_0 = execute(gpu_f_0, args, "${BACKEND_NAME}"); + auto interp_results_0 = execute(interp_f_0, args, "INTERPRETER"); + auto gpu_results_0 = execute(gpu_f_0, args, "${BACKEND_NAME}"); for (size_t i = 0; i < gpu_results_0.size(); i++) { - EXPECT_EQ(gpu_results_0.at(i), interp_results_0.at(i)); + EXPECT_TRUE(test::all_close_f( + gpu_results_0.at(i), interp_results_0.at(i), MIN_FLOAT_TOLERANCE_BITS)); } - auto interp_results_1 = execute(interp_f_1, args, "INTERPRETER"); - auto gpu_results_1 = execute(gpu_f_1, args, "${BACKEND_NAME}"); + auto interp_results_1 = execute(interp_f_1, args, "INTERPRETER"); + auto gpu_results_1 = execute(gpu_f_1, args, "${BACKEND_NAME}"); for (size_t i = 0; i < gpu_results_1.size(); i++) { - EXPECT_TRUE(test::all_close_f( - gpu_results_1.at(i), interp_results_1.at(i), MIN_FLOAT_TOLERANCE_BITS)); + EXPECT_EQ(gpu_results_1.at(i), interp_results_1.at(i)); } } @@ -1089,7 +1182,10 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_3d_large_input_min) Shape shape{4, 8192, 5}; auto A = make_shared(element::f32, shape); - auto B = make_shared(A, 1, element::i32, 10, false); + auto k = op::Constant::create(element::i64, {}, {10}); + int64_t axis = 1; + auto B = make_shared( + A, k, axis, op::v1::TopK::Mode::MIN, op::v1::TopK::SortType::SORT_VALUES); auto interp_f_0 = make_shared(OutputVector{B->output(0)}, ParameterVector{A}); auto interp_f_1 = make_shared(OutputVector{B->output(1)}, ParameterVector{A}); @@ -1104,20 +1200,20 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_3d_large_input_min) args.push_back(tensor_val); } - auto interp_results_0 = execute(interp_f_0, args, "INTERPRETER"); - auto gpu_results_0 = execute(gpu_f_0, args, "${BACKEND_NAME}"); + auto interp_results_0 = execute(interp_f_0, args, "INTERPRETER"); + auto gpu_results_0 = execute(gpu_f_0, args, "${BACKEND_NAME}"); for (size_t i = 0; i < gpu_results_0.size(); i++) { - EXPECT_EQ(gpu_results_0.at(i), interp_results_0.at(i)); + EXPECT_TRUE(test::all_close_f( + gpu_results_0.at(i), interp_results_0.at(i), MIN_FLOAT_TOLERANCE_BITS)); } - auto interp_results_1 = execute(interp_f_1, args, "INTERPRETER"); - auto gpu_results_1 = execute(gpu_f_1, args, "${BACKEND_NAME}"); + auto interp_results_1 = execute(interp_f_1, args, "INTERPRETER"); + auto gpu_results_1 = execute(gpu_f_1, args, "${BACKEND_NAME}"); for (size_t i = 0; i < gpu_results_1.size(); i++) { - EXPECT_TRUE(test::all_close_f( - gpu_results_1.at(i), interp_results_1.at(i), MIN_FLOAT_TOLERANCE_BITS)); + EXPECT_EQ(gpu_results_1.at(i), interp_results_1.at(i)); } } @@ -1126,8 +1222,11 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_3d_single_output) Shape shape{2, 3, 2}; Shape rshape{2, 2, 2}; auto A = make_shared(element::f32, shape); - auto B = make_shared(A, 1, element::i32, 2, false); - auto f0 = make_shared(OutputVector{B->output(0)}, ParameterVector{A}); + auto k = op::Constant::create(element::i64, {}, {2}); + int64_t axis = 1; + auto B = make_shared( + A, k, axis, op::v1::TopK::Mode::MIN, op::v1::TopK::SortType::SORT_VALUES); + auto f0 = make_shared(OutputVector{B->output(1)}, ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); diff --git a/ngraph/test/eval.cpp b/ngraph/test/eval.cpp index 6632f080456..926d6e0e355 100644 --- a/ngraph/test/eval.cpp +++ b/ngraph/test/eval.cpp @@ -1666,80 +1666,38 @@ TEST(eval, topk_v3_dyn_values_k0) ASSERT_EQ(result1_val, expec1); } -TEST(eval, topk_v0_dyn) +TEST(eval, topk_v1_dyn_k0) { Shape shape{2, 3, 2}; auto A = make_shared(element::f32, shape); auto k = make_shared(element::i64, Shape{}); - auto axis = make_shared(element::i64, Shape{}); element::Type result_et{element::i32}; - bool compute_max = true; + auto B = make_shared( + A, k, 1, op::v1::TopK::Mode::MAX, op::v1::TopK::SortType::SORT_VALUES, result_et); - auto B = make_shared( - A, k, axis, result_et, compute_max, op::v0::TopK::SortType::SORT_VALUES); - - auto fun = make_shared(OutputVector{B->output(0), B->output(1)}, - ParameterVector{A, k, axis}); + auto fun = + make_shared(OutputVector{B->output(0), B->output(1)}, ParameterVector{A, k}); auto result0 = make_shared(); auto result1 = make_shared(); ASSERT_TRUE(fun->evaluate({result0, result1}, {make_host_tensor( Shape{2, 3, 2}, {12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}), - make_host_tensor(Shape{}, {2}), - make_host_tensor(Shape{}, {1})})); - EXPECT_EQ(result0->get_element_type(), element::i32); - EXPECT_EQ(result0->get_partial_shape(), (PartialShape{2, 2, 2})); - EXPECT_EQ(result1->get_element_type(), element::f32); - EXPECT_EQ(result1->get_partial_shape(), (PartialShape{2, 2, 2})); - auto result1_val = read_vector(result1); - auto result0_val = read_vector(result0); - - vector expec1{12, 9, 10, 4, 11, 7, 6, 3}; - ASSERT_EQ(result1_val, expec1); - - vector expec0{0, 1, 1, 2, 2, 2, 0, 1}; - ASSERT_EQ(result0_val, expec0); -} - -TEST(eval, topk_v0_dyn_k0) -{ - Shape shape{2, 3, 2}; - - auto A = make_shared(element::f32, shape); - auto k = make_shared(element::i64, Shape{}); - auto axis = make_shared(element::i64, Shape{}); - - element::Type result_et{element::i32}; - bool compute_max = true; - - auto B = make_shared( - A, k, axis, result_et, compute_max, op::v0::TopK::SortType::SORT_VALUES); - - auto fun = make_shared(OutputVector{B->output(0), B->output(1)}, - ParameterVector{A, k, axis}); - - auto result0 = make_shared(); - auto result1 = make_shared(); - ASSERT_TRUE(fun->evaluate({result0, result1}, - {make_host_tensor( - Shape{2, 3, 2}, {12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}), - make_host_tensor(Shape{}, {0}), - make_host_tensor(Shape{}, {1})})); - EXPECT_EQ(result0->get_element_type(), element::i32); + make_host_tensor(Shape{}, {0})})); + EXPECT_EQ(result0->get_element_type(), element::f32); EXPECT_EQ(result0->get_partial_shape(), (PartialShape{2, 3, 2})); - EXPECT_EQ(result1->get_element_type(), element::f32); + EXPECT_EQ(result1->get_element_type(), element::i32); EXPECT_EQ(result1->get_partial_shape(), (PartialShape{2, 3, 2})); - auto result1_val = read_vector(result1); - auto result0_val = read_vector(result0); + auto result0_val = read_vector(result0); + auto result1_val = read_vector(result1); - vector expec1{12, 9, 10, 4, 8, 2, 11, 7, 6, 3, 5, 1}; - ASSERT_EQ(result1_val, expec1); - - vector expec0{0, 1, 1, 2, 2, 0, 2, 2, 0, 1, 1, 0}; + vector expec0{12, 9, 10, 4, 8, 2, 11, 7, 6, 3, 5, 1}; ASSERT_EQ(result0_val, expec0); + + vector expec1{0, 1, 1, 2, 2, 0, 2, 2, 0, 1, 1, 0}; + ASSERT_EQ(result1_val, expec1); } TEST(eval, topk_v3_param_dyn_values_k0) @@ -1798,76 +1756,70 @@ TEST(eval, topk_v3_param_dyn_values_k2) ASSERT_EQ(result1_val, expec1); } -TEST(eval, topk_v0_param_dyn_k2) +TEST(eval, topk_v1_param_dyn_k2) { auto A = make_shared(element::f32, PartialShape::dynamic()); auto k = make_shared(element::i64, Shape{}); - auto axis = make_shared(element::i64, Shape{}); + auto axis = 1; element::Type result_et{element::i32}; - bool compute_max = true; + auto B = make_shared( + A, k, axis, op::v1::TopK::Mode::MAX, op::v1::TopK::SortType::SORT_VALUES, result_et); - auto B = make_shared( - A, k, axis, result_et, compute_max, op::v0::TopK::SortType::SORT_VALUES); - - auto fun = make_shared(OutputVector{B->output(0), B->output(1)}, - ParameterVector{A, k, axis}); + auto fun = + make_shared(OutputVector{B->output(0), B->output(1)}, ParameterVector{A, k}); auto result0 = make_shared(); auto result1 = make_shared(); ASSERT_TRUE(fun->evaluate({result0, result1}, {make_host_tensor( Shape{2, 3, 2}, {12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}), - make_host_tensor(Shape{}, {2}), - make_host_tensor(Shape{}, {1})})); - EXPECT_EQ(result0->get_element_type(), element::i32); + make_host_tensor(Shape{}, {2})})); + EXPECT_EQ(result0->get_element_type(), element::f32); EXPECT_EQ(result0->get_partial_shape(), (PartialShape{2, 2, 2})); - EXPECT_EQ(result1->get_element_type(), element::f32); + EXPECT_EQ(result1->get_element_type(), element::i32); EXPECT_EQ(result1->get_partial_shape(), (PartialShape{2, 2, 2})); - auto result1_val = read_vector(result1); - auto result0_val = read_vector(result0); + auto result0_val = read_vector(result0); + auto result1_val = read_vector(result1); - vector expec1{12, 9, 10, 4, 11, 7, 6, 3}; - ASSERT_EQ(result1_val, expec1); - - vector expec0{0, 1, 1, 2, 2, 2, 0, 1}; + vector expec0{12, 9, 10, 4, 11, 7, 6, 3}; ASSERT_EQ(result0_val, expec0); + + vector expec1{0, 1, 1, 2, 2, 2, 0, 1}; + ASSERT_EQ(result1_val, expec1); } -TEST(eval, topk_v0_param_dyn_k0) +TEST(eval, topk_v1_param_dyn_k0) { auto A = make_shared(element::f32, PartialShape::dynamic()); auto k = make_shared(element::i64, Shape{}); - auto axis = make_shared(element::i64, Shape{}); element::Type result_et{element::i32}; - bool compute_max = true; - auto B = make_shared( - A, k, axis, result_et, compute_max, op::v0::TopK::SortType::SORT_VALUES); + auto B = make_shared( + A, k, 1, op::v1::TopK::Mode::MAX, op::v1::TopK::SortType::SORT_VALUES, result_et); - auto fun = make_shared(OutputVector{B->output(0), B->output(1)}, - ParameterVector{A, k, axis}); + auto fun = + make_shared(OutputVector{B->output(0), B->output(1)}, ParameterVector{A, k}); auto result0 = make_shared(); auto result1 = make_shared(); ASSERT_TRUE(fun->evaluate({result0, result1}, {make_host_tensor( Shape{2, 3, 2}, {12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}), - make_host_tensor(Shape{}, {0}), - make_host_tensor(Shape{}, {1})})); - EXPECT_EQ(result0->get_element_type(), element::i32); + make_host_tensor(Shape{}, {0})})); + EXPECT_EQ(result0->get_element_type(), element::f32); EXPECT_EQ(result0->get_partial_shape(), (PartialShape{2, 3, 2})); - EXPECT_EQ(result1->get_element_type(), element::f32); + EXPECT_EQ(result1->get_element_type(), element::i32); EXPECT_EQ(result1->get_partial_shape(), (PartialShape{2, 3, 2})); - auto result1_val = read_vector(result1); - auto result0_val = read_vector(result0); + auto result0_val = read_vector(result0); + auto result1_val = read_vector(result1); - vector expec1{12, 9, 10, 4, 8, 2, 11, 7, 6, 3, 5, 1}; - ASSERT_EQ(result1_val, expec1); - - vector expec0{0, 1, 1, 2, 2, 0, 2, 2, 0, 1, 1, 0}; + vector expec0{12, 9, 10, 4, 8, 2, 11, 7, 6, 3, 5, 1}; ASSERT_EQ(result0_val, expec0); + + vector expec1{0, 1, 1, 2, 2, 0, 2, 2, 0, 1, 1, 0}; + ASSERT_EQ(result1_val, expec1); } TEST(eval, reduce_logical_and__neg_axis) diff --git a/ngraph/test/onnx/onnx_import_provenance.in.cpp b/ngraph/test/onnx/onnx_import_provenance.in.cpp index 20d76b179be..222af22be8c 100644 --- a/ngraph/test/onnx/onnx_import_provenance.in.cpp +++ b/ngraph/test/onnx/onnx_import_provenance.in.cpp @@ -130,7 +130,6 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_provenance_tag_downgrade_pass) pass_manager.register_pass(); pass_manager.run_passes(function); - test_provenance_tags(function, " values, indices)>"); - test_provenance_tags(function, ""); - test_provenance_tags(function, ""); + test_provenance_tags(function, " values, indices)>"); + test_provenance_tags(function, ""); } diff --git a/ngraph/test/op_is.cpp b/ngraph/test/op_is.cpp index 942d86f9091..da0387be000 100644 --- a/ngraph/test/op_is.cpp +++ b/ngraph/test/op_is.cpp @@ -868,7 +868,7 @@ namespace void op_is_TopK() { - op::TopK node; + op::v1::TopK node; EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node)); EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node)); EXPECT_FALSE(op::is_binary_elementwise_comparison(&node)); diff --git a/ngraph/test/provenance.cpp b/ngraph/test/provenance.cpp index b814b16ba37..e9a28d3cec6 100644 --- a/ngraph/test/provenance.cpp +++ b/ngraph/test/provenance.cpp @@ -451,49 +451,6 @@ TEST(provenance, fused_decomposition_tag) traverse_nodes(as_node_vector(decomposed_op->outputs()), tag_check, {p1}); } -TEST(provenance, topk_setk) -{ - auto p1 = make_shared(element::f32, PartialShape{20, 3, 4}); - p1->add_provenance_tag("P1"); - auto tk = make_shared(p1, 0, element::i32, 10); - tk->add_provenance_tag("TK"); - auto tkc0 = tk->input_value(1).get_node_shared_ptr(); - tkc0->add_provenance_tag("TKC0"); - for (auto node : topological_sort(NodeVector{tk})) - { - if (node == p1) - { - EXPECT_EQ(node->get_provenance_tags(), (ProvSet{"P1"})); - } - else if (node == tkc0) - { - EXPECT_EQ(node->get_provenance_tags(), (ProvSet{"TK", "TKC0"})); - } - else - { - EXPECT_EQ(node->get_provenance_tags(), (ProvSet{"TK"})); - } - } - tk->set_k(5); - auto tkc1 = tk->input_value(1).get_node_shared_ptr(); - tkc1->add_provenance_tag("TKC1"); - for (auto node : topological_sort(NodeVector{tk})) - { - if (node == p1) - { - EXPECT_EQ(node->get_provenance_tags(), (ProvSet{"P1"})); - } - else if (node == tkc1) - { - EXPECT_EQ(node->get_provenance_tags(), (ProvSet{"TK", "TKC0", "TKC1"})); - } - else - { - EXPECT_EQ(node->get_provenance_tags(), (ProvSet{"TK"})); - } - } -} - TEST(provenance, empty_group) { auto p1 = make_shared(element::i32, PartialShape{2, 3, 4}); @@ -515,64 +472,6 @@ TEST(provenance, empty_group) } } -TEST(provenance, opset1_upgrade_pass_topk) -{ - test::ProvenanceEnabler provenance_enabler; - - const size_t axis = 2; - const size_t k = 10; - const auto data = make_shared(element::i32, Shape{5, 10, 15}); - - const auto topk_v0 = make_shared(data, axis, element::i32, k); - const auto result = make_shared(topk_v0->output(0)); - auto f = make_shared(ResultVector{result}, ParameterVector{data}); - - ngraph::pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); - - const auto pass_replacement_node = f->get_result()->get_input_node_shared_ptr(0); - const auto topk_v1 = as_type_ptr(pass_replacement_node); - - const std::string tag = ""; - auto tag_check = [&tag](std::shared_ptr node) { - auto tags = node->get_provenance_tags(); - EXPECT_TRUE(tags.find(tag) != tags.end()); - }; - traverse_nodes({topk_v1}, tag_check, as_node_vector(topk_v0->input_values())); -} - -TEST(provenance, opset0_downgrade_pass_topk) -{ - test::ProvenanceEnabler provenance_enabler; - - const auto data = make_shared(element::i32, Shape{5, 10, 15}); - const int32_t k = 10; - const auto k_node = op::Constant::create(element::i64, Shape{}, {k}); - const size_t axis = 2; - const auto mode = op::v1::TopK::Mode::MAX; - const auto sort = op::v1::TopK::SortType::SORT_INDICES; - const auto elem_type = element::i64; - - const auto topk_v1 = make_shared(data, k_node, axis, mode, sort, elem_type); - const auto result = make_shared(topk_v1->output(0)); - auto f = make_shared(ResultVector{result}, ParameterVector{data}); - - ngraph::pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); - - const auto pass_replacement_node = f->get_result()->get_input_node_shared_ptr(0); - const auto topk_v0 = as_type_ptr(pass_replacement_node); - - const std::string tag = ""; - auto tag_check = [&tag](std::shared_ptr node) { - auto tags = node->get_provenance_tags(); - EXPECT_TRUE(tags.find(tag) != tags.end()); - }; - traverse_nodes({topk_v0}, tag_check, as_node_vector(topk_v1->input_values())); -} - TEST(provenance, opset1_upgrade_pass_graph) { test::ProvenanceEnabler provenance_enabler; diff --git a/ngraph/test/runtime/interpreter/int_executable.cpp b/ngraph/test/runtime/interpreter/int_executable.cpp index 6190ce6ce4d..2cec9a01f85 100644 --- a/ngraph/test/runtime/interpreter/int_executable.cpp +++ b/ngraph/test/runtime/interpreter/int_executable.cpp @@ -187,10 +187,6 @@ bool runtime::interpreter::INTExecutable::call(const vectorget_input_element_type(1); } - else if (is_type(op)) - { - type = op->get_output_element_type(1); - } else { type = op->get_output_element_type(0); diff --git a/ngraph/test/runtime/interpreter/int_executable.hpp b/ngraph/test/runtime/interpreter/int_executable.hpp index 60cd755d69f..0d9e05c6e19 100644 --- a/ngraph/test/runtime/interpreter/int_executable.hpp +++ b/ngraph/test/runtime/interpreter/int_executable.hpp @@ -1215,39 +1215,6 @@ protected: args[0]->get_data_ptr(), out[0]->get_data_ptr(), element_count); break; } - case OP_TYPEID::TopK: - { - const op::TopK* topk = static_cast(&node); - if (node.get_output_element_type(0) == element::i64) - { - reference::topk(args[0]->get_data_ptr(), - out[0]->get_data_ptr(), - out[1]->get_data_ptr(), - node.get_input_shape(0), - node.get_output_shape(0), - topk->get_top_k_axis(), - topk->get_k(), - topk->get_compute_max(), - topk->get_sort()); - } - else if (node.get_output_element_type(0) == element::i32) - { - reference::topk(args[0]->get_data_ptr(), - out[0]->get_data_ptr(), - out[1]->get_data_ptr(), - node.get_input_shape(0), - node.get_output_shape(0), - topk->get_top_k_axis(), - topk->get_k(), - topk->get_compute_max(), - topk->get_sort()); - } - else - { - throw ngraph_error("Unexpected type"); - } - break; - } case OP_TYPEID::DetectionOutput_v0: { const op::DetectionOutput* detOut = static_cast(&node); diff --git a/ngraph/test/runtime/opset0_tbl.hpp b/ngraph/test/runtime/opset0_tbl.hpp index ac51e1d7b66..422d698af8f 100644 --- a/ngraph/test/runtime/opset0_tbl.hpp +++ b/ngraph/test/runtime/opset0_tbl.hpp @@ -126,6 +126,5 @@ NGRAPH_OP(Tan, ngraph::op) NGRAPH_OP(Tanh, ngraph::op) NGRAPH_OP(TensorIterator, ngraph::op) NGRAPH_OP(Tile, ngraph::op::v0) -NGRAPH_OP(TopK, ngraph::op::v0) NGRAPH_OP(Unsqueeze, ngraph::op) NGRAPH_OP(Xor, ngraph::op) diff --git a/ngraph/test/runtime/pass/opset0_downgrade.cpp b/ngraph/test/runtime/pass/opset0_downgrade.cpp index 1920f63cd29..82e8bf3c7d5 100644 --- a/ngraph/test/runtime/pass/opset0_downgrade.cpp +++ b/ngraph/test/runtime/pass/opset0_downgrade.cpp @@ -324,32 +324,6 @@ namespace opset0_downgrade return op_cast_binary_elementwise_node(node); } - shared_ptr op_cast(shared_ptr node) - { - const auto axis = node->get_axis(); - const auto sort_type = node->get_sort_type(); - const auto index_elem_type = node->get_index_element_type(); - - bool compute_max; - switch (node->get_mode()) - { - case op::v1::TopK::Mode::MAX: compute_max = true; break; - case op::v1::TopK::Mode::MIN: compute_max = false; break; - default: break; - } - - const auto arg_node = node->input_value(0); - const auto k_node = node->input_value(1); - - auto replacement_node = make_shared( - arg_node, k_node, axis, index_elem_type, compute_max, sort_type); - - // values output will be 0, indices 1 - vector output_order{1, 0}; - replace_node(node, replacement_node, output_order); - return replacement_node; - } - using DispatchMap = map node)>>; template diff --git a/ngraph/test/runtime/pass/opset1_upgrade.cpp b/ngraph/test/runtime/pass/opset1_upgrade.cpp index 72ff93ae7c1..333ab280f71 100644 --- a/ngraph/test/runtime/pass/opset1_upgrade.cpp +++ b/ngraph/test/runtime/pass/opset1_upgrade.cpp @@ -289,44 +289,6 @@ namespace opset1_upgrade return op_cast_binary_elementwise_node(node); } - shared_ptr op_cast(shared_ptr node) - { - NGRAPH_CHECK(op::is_constant(node->input_value(1).get_node()), - "parameter k is expected to be a static constant"); - NGRAPH_CHECK(op::is_constant(node->input_value(2).get_node()), - "parameter top_k_axis is expected to be a static constant"); - - const auto k = node->get_k(); - const auto axis = node->get_top_k_axis(); - - std::string sort; - switch (node->get_sort()) - { - case op::TopK::SortType::SORT_INDICES: sort = "index"; break; - case op::TopK::SortType::SORT_VALUES: sort = "value"; break; - case op::TopK::SortType::NONE: sort = "none"; break; - } - - std::string mode; - if (node->get_compute_max()) - { - mode = "max"; - } - else - { - mode = "min"; - } - - const auto k_constant = op::Constant::create(element::i64, Shape{}, {k}); - auto replacement_node = - make_shared(node->input_value(0), k_constant, axis, mode, sort); - - // indices output will be 0, values 1 - vector output_order{1, 0}; - replace_node(node, replacement_node, output_order); - return replacement_node; - } - shared_ptr op_cast(shared_ptr node) { auto replacement_node = make_shared( diff --git a/ngraph/test/type_prop/top_k.cpp b/ngraph/test/type_prop/top_k.cpp index a9a9964eac1..bb04e2cafd2 100644 --- a/ngraph/test/type_prop/top_k.cpp +++ b/ngraph/test/type_prop/top_k.cpp @@ -18,330 +18,9 @@ #include "ngraph/ngraph.hpp" #include "util/type_prop.hpp" -NGRAPH_SUPPRESS_DEPRECATED_START - using namespace std; using namespace ngraph; -TEST(type_prop, topk_invalid_rank) -{ - auto a = make_shared(element::f32, Shape{}); - - try - { - auto topk = make_shared(a, 0, element::i32, 1, true); - FAIL() << "TopK c-tor should throw for scalar shapes"; - } - catch (const NodeValidationFailure& error) - { - EXPECT_HAS_SUBSTRING(error.what(), "Argument rank must be greater than 0"); - } - catch (...) - { - FAIL() << "Deduced type check failed for unexpected reason"; - } -} - -TEST(type_prop, topk_invalid_top_k) -{ - auto a = make_shared(element::f32, Shape{2, 2}); - - try - { - auto topk = make_shared(a, 2, element::i32, 1, true); - FAIL() << "TopK c-tor should throw for invalid top k axis"; - } - catch (const NodeValidationFailure& error) - { - EXPECT_HAS_SUBSTRING(error.what(), "TopK axis (2) is out of bounds"); - } - catch (...) - { - FAIL() << "Deduced type check failed for unexpected reason"; - } -} - -TEST(type_prop, topk_invalid_index_type) -{ - auto a = make_shared(element::f32, Shape{2, 2}); - - try - { - auto topk = make_shared(a, 0, element::f32, 1, true); - FAIL() << "TopK c-tor should throw for invalid index element type"; - } - catch (const NodeValidationFailure& error) - { - EXPECT_HAS_SUBSTRING(error.what(), "Argument element type must be i64 or i32 (got f32)"); - } - catch (...) - { - FAIL() << "Deduced type check failed for unexpected reason"; - } -} - -TEST(type_prop, topk_invalid_k) -{ - auto a = make_shared(element::f32, Shape{2, 2}); - - try - { - auto topk = make_shared(a, 0, element::i32, 3, true); - FAIL() << "TopK c-tor should throw for invalid K"; - } - catch (const NodeValidationFailure& error) - { - EXPECT_HAS_SUBSTRING(error.what(), - "K (3) exceeds the dimension (2) of the TopK axis (axis 0)"); - } - catch (...) - { - FAIL() << "Deduced type check failed for unexpected reason"; - } -} - -TEST(type_prop, topk_rank_dynamic_ok) -{ - element::Type arg_et{element::f32}; - PartialShape arg_shape{PartialShape::dynamic()}; - size_t top_k_axis = 22; - size_t k = 900; - element::Type result_et{element::i32}; - bool compute_max = true; - - auto param = make_shared(arg_et, arg_shape); - - auto topk = make_shared(param, top_k_axis, result_et, k, compute_max); - - ASSERT_TRUE(topk->get_output_element_type(0) == element::i32); - ASSERT_TRUE(topk->get_output_element_type(1) == element::f32); - ASSERT_TRUE(topk->get_output_partial_shape(0).rank().is_dynamic()); - ASSERT_TRUE(topk->get_output_partial_shape(1).rank().is_dynamic()); - ASSERT_TRUE(topk->get_sort() == op::v0::TopK::SortType::SORT_VALUES); - try - { - auto badout = Output(topk); - FAIL() << "No default output for topk"; - } - catch (const NodeValidationFailure&) - { - } -} - -TEST(type_prop, topk_rank_dynamic_result_et_dynamic) -{ - element::Type arg_et{element::f32}; - PartialShape arg_shape{PartialShape::dynamic()}; - size_t top_k_axis = 22; - size_t k = 900; - element::Type result_et{element::dynamic}; - bool compute_max = true; - - auto param = make_shared(arg_et, arg_shape); - - try - { - auto topk = make_shared(param, top_k_axis, result_et, k, compute_max); - FAIL() << "Dynamic result element type not detected"; - } - catch (const NodeValidationFailure& error) - { - EXPECT_HAS_SUBSTRING(error.what(), "Argument element type must not be dynamic"); - } - catch (...) - { - FAIL() << "Deduced type check failed for unexpected reason"; - } -} - -TEST(type_prop, topk_rank_dynamic_result_et_invalid) -{ - element::Type arg_et{element::f32}; - PartialShape arg_shape{PartialShape::dynamic()}; - size_t top_k_axis = 22; - size_t k = 900; - element::Type result_et{element::f32}; - bool compute_max = true; - - auto param = make_shared(arg_et, arg_shape); - - try - { - auto topk = make_shared(param, top_k_axis, result_et, k, compute_max); - FAIL() << "Invalid result element type not detected"; - } - catch (const NodeValidationFailure& error) - { - EXPECT_HAS_SUBSTRING(error.what(), "Argument element type must be i64 or i32 (got f32)"); - } - catch (...) - { - FAIL() << "Deduced type check failed for unexpected reason"; - } -} - -TEST(type_prop, topk_rank_static_dynamic_k_known_topk_dim_dynamic_ok) -{ - element::Type arg_et{element::f32}; - PartialShape arg_shape{Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic()}; - size_t top_k_axis = 1; - size_t k = 999; - element::Type result_et{element::i32}; - bool compute_max = true; - - auto param = make_shared(arg_et, arg_shape); - - auto topk = make_shared(param, top_k_axis, result_et, k, compute_max); - - ASSERT_TRUE(topk->get_output_element_type(0) == element::i32); - ASSERT_TRUE(topk->get_output_element_type(1) == element::f32); - ASSERT_TRUE(topk->get_output_partial_shape(0).same_scheme( - PartialShape{Dimension::dynamic(), 999, Dimension::dynamic()})); - ASSERT_TRUE(topk->get_output_partial_shape(1).same_scheme( - PartialShape{Dimension::dynamic(), 999, Dimension::dynamic()})); -} - -TEST(type_prop, topk_rank_static_dynamic_k_unknown_topk_dim_dynamic_ok) -{ - element::Type arg_et{element::f32}; - PartialShape arg_shape{Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic()}; - size_t top_k_axis = 1; - size_t k = 0; - element::Type result_et{element::i32}; - bool compute_max = true; - - auto param = make_shared(arg_et, arg_shape); - - auto topk = make_shared(param, top_k_axis, result_et, k, compute_max); - - ASSERT_TRUE(topk->get_output_element_type(0) == element::i32); - ASSERT_TRUE(topk->get_output_element_type(1) == element::f32); - ASSERT_TRUE(topk->get_output_partial_shape(0).same_scheme( - PartialShape{Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic()})); - ASSERT_TRUE(topk->get_output_partial_shape(1).same_scheme( - PartialShape{Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic()})); -} - -TEST(type_prop, topk_rank_static_dynamic_axis_oob) -{ - element::Type arg_et{element::f32}; - PartialShape arg_shape{Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic()}; - size_t top_k_axis = 22; - size_t k = 900; - element::Type result_et{element::f32}; - bool compute_max = true; - - auto param = make_shared(arg_et, arg_shape); - - try - { - auto topk = make_shared(param, top_k_axis, result_et, k, compute_max); - FAIL() << "TopK axis out-of-bounds not detected"; - } - catch (const NodeValidationFailure& error) - { - EXPECT_HAS_SUBSTRING(error.what(), "Argument element type must be i64 or i32 (got f32)"); - } - catch (...) - { - FAIL() << "Deduced type check failed for unexpected reason"; - } -} - -TEST(type_prop, topk_rank_static_dynamic_k_unknown_axis_oob) -{ - element::Type arg_et{element::f32}; - PartialShape arg_shape{Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic()}; - size_t top_k_axis = 22; - size_t k = 0; - element::Type result_et{element::f32}; - bool compute_max = true; - - auto param = make_shared(arg_et, arg_shape); - - try - { - auto topk = make_shared(param, top_k_axis, result_et, k, compute_max); - FAIL() << "TopK axis out-of-bounds not detected"; - } - catch (const NodeValidationFailure& error) - { - EXPECT_HAS_SUBSTRING(error.what(), "Argument element type must be i64 or i32 (got f32)"); - } - catch (...) - { - FAIL() << "Deduced type check failed for unexpected reason"; - } -} - -TEST(type_prop, topk_rank_static_dynamic_k_known_too_big) -{ - element::Type arg_et{element::f32}; - PartialShape arg_shape{Dimension::dynamic(), 3, Dimension::dynamic()}; - size_t top_k_axis = 1; - size_t k = 4; - element::Type result_et{element::f32}; - bool compute_max = true; - - auto param = make_shared(arg_et, arg_shape); - - try - { - auto topk = make_shared(param, top_k_axis, result_et, k, compute_max); - FAIL() << "Oversize K not detected"; - } - catch (const NodeValidationFailure& error) - { - EXPECT_HAS_SUBSTRING(error.what(), "Argument element type must be i64 or i32 (got f32)"); - } - catch (...) - { - FAIL() << "Deduced type check failed for unexpected reason"; - } -} - -TEST(type_prop, topk_rank_static_dynamic_k_unknown_ok) -{ - element::Type arg_et{element::f32}; - PartialShape arg_shape{Dimension::dynamic(), 3, Dimension::dynamic()}; - size_t top_k_axis = 1; - size_t k = 0; - element::Type result_et{element::i32}; - bool compute_max = true; - - auto param = make_shared(arg_et, arg_shape); - - auto topk = make_shared(param, top_k_axis, result_et, k, compute_max); - - ASSERT_TRUE(topk->get_output_element_type(0) == element::i32); - ASSERT_TRUE(topk->get_output_element_type(1) == element::f32); - ASSERT_TRUE(topk->get_output_partial_shape(0).same_scheme( - PartialShape{Dimension::dynamic(), 3, Dimension::dynamic()})); - ASSERT_TRUE(topk->get_output_partial_shape(1).same_scheme( - PartialShape{Dimension::dynamic(), 3, Dimension::dynamic()})); -} - -TEST(type_prop, topk_rank_static_dynamic_k_known_ok) -{ - element::Type arg_et{element::f32}; - PartialShape arg_shape{Dimension::dynamic(), 3, Dimension::dynamic()}; - size_t top_k_axis = 1; - size_t k = 2; - element::Type result_et{element::i32}; - bool compute_max = true; - - auto param = make_shared(arg_et, arg_shape); - - auto topk = make_shared(param, top_k_axis, result_et, k, compute_max); - - ASSERT_TRUE(topk->get_output_element_type(0) == element::i32); - ASSERT_TRUE(topk->get_output_element_type(1) == element::f32); - ASSERT_TRUE(topk->get_output_partial_shape(0).same_scheme( - PartialShape{Dimension::dynamic(), 2, Dimension::dynamic()})); - ASSERT_TRUE(topk->get_output_partial_shape(1).same_scheme( - PartialShape{Dimension::dynamic(), 2, Dimension::dynamic()})); -} - // Since v3::TopK is backward compatible with v1::TopK all of these tests should pass template class topk_type_prop : public ::testing::Test