Extend nGraph for operation GatherND-5 and implement reference (#2587)
Signed-off-by: Roman Kazantsev <roman.kazantsev@intel.com>
This commit is contained in:
@@ -287,6 +287,7 @@ set(MULTI_TEST_SRC
|
||||
backend/function_name.in.cpp
|
||||
backend/fused_op.in.cpp
|
||||
backend/gather.in.cpp
|
||||
backend/gather_nd.in.cpp
|
||||
backend/gelu.in.cpp
|
||||
backend/group_convolution.in.cpp
|
||||
backend/interpolate.in.cpp
|
||||
|
||||
@@ -324,288 +324,6 @@ NGRAPH_TEST(${BACKEND_NAME}, gather_scalar_indices_axis_1_2d_input)
|
||||
(vector<float>{1.0f, 2.0f, 3.0f}), read_vector<float>(result), MIN_FLOAT_TOLERANCE_BITS));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, gather_nd_single_indices)
|
||||
{
|
||||
Shape params_shape{3, 3};
|
||||
Shape indices_shape{2};
|
||||
Shape out_shape{};
|
||||
auto P = make_shared<op::Parameter>(element::f32, params_shape);
|
||||
auto I = make_shared<op::Parameter>(element::i32, indices_shape);
|
||||
auto G = make_shared<op::GatherND>(P, I);
|
||||
auto f = make_shared<Function>(G, ParameterVector{P, I});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto p = backend->create_tensor(element::f32, params_shape);
|
||||
copy_data(p, vector<float>{1.0f, 1.1f, 1.2f, 1.3f, 1.4f, 1.5f, 1.6f, 1.7f, 1.8f});
|
||||
auto i = backend->create_tensor(element::i32, indices_shape);
|
||||
copy_data(i, vector<int32_t>{1, 2});
|
||||
auto result = backend->create_tensor(element::f32, out_shape);
|
||||
|
||||
auto c = backend->compile(f);
|
||||
c->call_with_validate({result}, {p, i});
|
||||
EXPECT_TRUE(test::all_close_f(
|
||||
(vector<float>{1.5f}), read_vector<float>(result), MIN_FLOAT_TOLERANCE_BITS));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, gather_nd_scalar_from_2d)
|
||||
{
|
||||
Shape params_shape{2, 2};
|
||||
Shape indices_shape{2, 2};
|
||||
Shape out_shape{2};
|
||||
auto P = make_shared<op::Parameter>(element::f32, params_shape);
|
||||
auto I = make_shared<op::Parameter>(element::i32, indices_shape);
|
||||
auto G = make_shared<op::GatherND>(P, I);
|
||||
auto f = make_shared<Function>(G, ParameterVector{P, I});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto p = backend->create_tensor(element::f32, params_shape);
|
||||
copy_data(p, vector<float>{1.0f, 1.1f, 1.2f, 1.3f});
|
||||
auto i = backend->create_tensor(element::i32, indices_shape);
|
||||
copy_data(i, vector<int32_t>{0, 0, 1, 1});
|
||||
auto result = backend->create_tensor(element::f32, out_shape);
|
||||
|
||||
auto c = backend->compile(f);
|
||||
c->call_with_validate({result}, {p, i});
|
||||
EXPECT_TRUE(test::all_close_f(
|
||||
(vector<float>{1.0f, 1.3f}), read_vector<float>(result), MIN_FLOAT_TOLERANCE_BITS));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, gather_nd_1d_from_2d)
|
||||
{
|
||||
Shape params_shape{2, 2};
|
||||
Shape indices_shape{2, 1};
|
||||
Shape out_shape{2, 2};
|
||||
auto P = make_shared<op::Parameter>(element::f32, params_shape);
|
||||
auto I = make_shared<op::Parameter>(element::i32, indices_shape);
|
||||
auto G = make_shared<op::GatherND>(P, I);
|
||||
auto f = make_shared<Function>(G, ParameterVector{P, I});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto p = backend->create_tensor(element::f32, params_shape);
|
||||
copy_data(p, vector<float>{1.0f, 1.1f, 1.2f, 1.3f});
|
||||
auto i = backend->create_tensor(element::i32, indices_shape);
|
||||
copy_data(i, vector<int32_t>{1, 0});
|
||||
auto result = backend->create_tensor(element::f32, out_shape);
|
||||
|
||||
auto c = backend->compile(f);
|
||||
c->call_with_validate({result}, {p, i});
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{1.2f, 1.3f, 1.0f, 1.1f}),
|
||||
read_vector<float>(result),
|
||||
MIN_FLOAT_TOLERANCE_BITS));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, gather_nd_scalar_from_3d)
|
||||
{
|
||||
Shape params_shape{2, 2, 2};
|
||||
Shape indices_shape{2, 3};
|
||||
Shape out_shape{2};
|
||||
auto P = make_shared<op::Parameter>(element::f32, params_shape);
|
||||
auto I = make_shared<op::Parameter>(element::i32, indices_shape);
|
||||
auto G = make_shared<op::GatherND>(P, I);
|
||||
auto f = make_shared<Function>(G, ParameterVector{P, I});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto p = backend->create_tensor(element::f32, params_shape);
|
||||
copy_data(p, vector<float>{1.0f, 1.1f, 1.2f, 1.3f, 2.0f, 2.1f, 2.2f, 2.3f});
|
||||
auto i = backend->create_tensor(element::i32, indices_shape);
|
||||
copy_data(i, vector<int32_t>{0, 0, 1, 1, 0, 1});
|
||||
auto result = backend->create_tensor(element::f32, out_shape);
|
||||
|
||||
auto c = backend->compile(f);
|
||||
c->call_with_validate({result}, {p, i});
|
||||
EXPECT_TRUE(test::all_close_f(
|
||||
(vector<float>{1.1f, 2.1f}), read_vector<float>(result), MIN_FLOAT_TOLERANCE_BITS));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, gather_nd_1d_from_3d)
|
||||
{
|
||||
Shape params_shape{2, 2, 2};
|
||||
Shape indices_shape{2, 2};
|
||||
Shape out_shape{2, 2};
|
||||
auto P = make_shared<op::Parameter>(element::f32, params_shape);
|
||||
auto I = make_shared<op::Parameter>(element::i32, indices_shape);
|
||||
auto G = make_shared<op::GatherND>(P, I);
|
||||
auto f = make_shared<Function>(G, ParameterVector{P, I});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto p = backend->create_tensor(element::f32, params_shape);
|
||||
copy_data(p, vector<float>{1.0f, 1.1f, 1.2f, 1.3f, 2.0f, 2.1f, 2.2f, 2.3f});
|
||||
auto i = backend->create_tensor(element::i32, indices_shape);
|
||||
copy_data(i, vector<int32_t>{0, 1, 1, 0});
|
||||
auto result = backend->create_tensor(element::f32, out_shape);
|
||||
|
||||
auto c = backend->compile(f);
|
||||
c->call_with_validate({result}, {p, i});
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{1.2f, 1.3f, 2.0f, 2.1f}),
|
||||
read_vector<float>(result),
|
||||
MIN_FLOAT_TOLERANCE_BITS));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, gather_nd_2d_from_3d)
|
||||
{
|
||||
Shape params_shape{2, 2, 2};
|
||||
Shape indices_shape{1, 1};
|
||||
Shape out_shape{1, 2, 2};
|
||||
auto P = make_shared<op::Parameter>(element::f32, params_shape);
|
||||
auto I = make_shared<op::Parameter>(element::i32, indices_shape);
|
||||
auto G = make_shared<op::GatherND>(P, I);
|
||||
auto f = make_shared<Function>(G, ParameterVector{P, I});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto p = backend->create_tensor(element::f32, params_shape);
|
||||
copy_data(p, vector<float>{1.0f, 1.1f, 1.2f, 1.3f, 2.0f, 2.1f, 2.2f, 2.3f});
|
||||
auto i = backend->create_tensor(element::i32, indices_shape);
|
||||
copy_data(i, vector<int32_t>{1});
|
||||
auto result = backend->create_tensor(element::f32, out_shape);
|
||||
|
||||
auto c = backend->compile(f);
|
||||
c->call_with_validate({result}, {p, i});
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{2.0f, 2.1f, 2.2f, 2.3f}),
|
||||
read_vector<float>(result),
|
||||
MIN_FLOAT_TOLERANCE_BITS));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, gather_nd_batch_scalar_from_2d)
|
||||
{
|
||||
Shape params_shape{2, 2};
|
||||
Shape indices_shape{2, 1, 2};
|
||||
Shape out_shape{2, 1};
|
||||
auto P = make_shared<op::Parameter>(element::f32, params_shape);
|
||||
auto I = make_shared<op::Parameter>(element::i32, indices_shape);
|
||||
auto G = make_shared<op::GatherND>(P, I);
|
||||
auto f = make_shared<Function>(G, ParameterVector{P, I});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto p = backend->create_tensor(element::f32, params_shape);
|
||||
copy_data(p, vector<float>{1.0f, 1.1f, 1.2f, 1.3f});
|
||||
auto i = backend->create_tensor(element::i32, indices_shape);
|
||||
copy_data(i, vector<int32_t>{0, 0, 0, 1});
|
||||
auto result = backend->create_tensor(element::f32, out_shape);
|
||||
|
||||
auto c = backend->compile(f);
|
||||
c->call_with_validate({result}, {p, i});
|
||||
EXPECT_TRUE(test::all_close_f(
|
||||
(vector<float>{1.0f, 1.1f}), read_vector<float>(result), MIN_FLOAT_TOLERANCE_BITS));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, gather_nd_batch_1d_from_2d)
|
||||
{
|
||||
Shape params_shape{2, 2};
|
||||
Shape indices_shape{2, 1, 1};
|
||||
Shape out_shape{2, 1, 2};
|
||||
auto P = make_shared<op::Parameter>(element::f32, params_shape);
|
||||
auto I = make_shared<op::Parameter>(element::i32, indices_shape);
|
||||
auto G = make_shared<op::GatherND>(P, I);
|
||||
auto f = make_shared<Function>(G, ParameterVector{P, I});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto p = backend->create_tensor(element::f32, params_shape);
|
||||
copy_data(p, vector<float>{1.0f, 1.1f, 1.2f, 1.3f});
|
||||
auto i = backend->create_tensor(element::i32, indices_shape);
|
||||
copy_data(i, vector<int32_t>{1, 0});
|
||||
auto result = backend->create_tensor(element::f32, out_shape);
|
||||
|
||||
auto c = backend->compile(f);
|
||||
c->call_with_validate({result}, {p, i});
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{1.2f, 1.3f, 1.0f, 1.1f}),
|
||||
read_vector<float>(result),
|
||||
MIN_FLOAT_TOLERANCE_BITS));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, gather_nd_batch_scalar_from_3d)
|
||||
{
|
||||
Shape params_shape{2, 2, 2};
|
||||
Shape indices_shape{2, 2, 3};
|
||||
Shape out_shape{2, 2};
|
||||
auto P = make_shared<op::Parameter>(element::f32, params_shape);
|
||||
auto I = make_shared<op::Parameter>(element::i32, indices_shape);
|
||||
auto G = make_shared<op::GatherND>(P, I);
|
||||
auto f = make_shared<Function>(G, ParameterVector{P, I});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto p = backend->create_tensor(element::f32, params_shape);
|
||||
copy_data(p, vector<float>{1.0f, 1.1f, 1.2f, 1.3f, 2.0f, 2.1f, 2.2f, 2.3f});
|
||||
auto i = backend->create_tensor(element::i32, indices_shape);
|
||||
copy_data(i, vector<int32_t>{0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0});
|
||||
auto result = backend->create_tensor(element::f32, out_shape);
|
||||
|
||||
auto c = backend->compile(f);
|
||||
c->call_with_validate({result}, {p, i});
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{1.1f, 2.1f, 1.3f, 2.2f}),
|
||||
read_vector<float>(result),
|
||||
MIN_FLOAT_TOLERANCE_BITS));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, gather_nd_batch_1d_from_3d)
|
||||
{
|
||||
Shape params_shape{2, 2, 2};
|
||||
Shape indices_shape{2, 2, 2};
|
||||
Shape out_shape{2, 2, 2};
|
||||
auto P = make_shared<op::Parameter>(element::f32, params_shape);
|
||||
auto I = make_shared<op::Parameter>(element::i32, indices_shape);
|
||||
auto G = make_shared<op::GatherND>(P, I);
|
||||
auto f = make_shared<Function>(G, ParameterVector{P, I});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto p = backend->create_tensor(element::f32, params_shape);
|
||||
copy_data(p, vector<float>{1.0f, 1.1f, 1.2f, 1.3f, 2.0f, 2.1f, 2.2f, 2.3f});
|
||||
auto i = backend->create_tensor(element::i32, indices_shape);
|
||||
copy_data(i, vector<int32_t>{0, 1, 1, 0, 0, 0, 1, 1});
|
||||
auto result = backend->create_tensor(element::f32, out_shape);
|
||||
|
||||
auto c = backend->compile(f);
|
||||
c->call_with_validate({result}, {p, i});
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{1.2f, 1.3f, 2.0f, 2.1f, 1.0f, 1.1f, 2.2f, 2.3f}),
|
||||
read_vector<float>(result),
|
||||
MIN_FLOAT_TOLERANCE_BITS));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, gather_nd_batch_2d_from_3d)
|
||||
{
|
||||
Shape params_shape{2, 2, 2};
|
||||
Shape indices_shape{2, 1, 1};
|
||||
Shape out_shape{2, 1, 2, 2};
|
||||
auto P = make_shared<op::Parameter>(element::f32, params_shape);
|
||||
auto I = make_shared<op::Parameter>(element::i32, indices_shape);
|
||||
auto G = make_shared<op::GatherND>(P, I);
|
||||
auto f = make_shared<Function>(G, ParameterVector{P, I});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto p = backend->create_tensor(element::f32, params_shape);
|
||||
copy_data(p, vector<float>{1.0f, 1.1f, 1.2f, 1.3f, 2.0f, 2.1f, 2.2f, 2.3f});
|
||||
auto i = backend->create_tensor(element::i32, indices_shape);
|
||||
copy_data(i, vector<int32_t>{1, 0});
|
||||
auto result = backend->create_tensor(element::f32, out_shape);
|
||||
|
||||
auto c = backend->compile(f);
|
||||
c->call_with_validate({result}, {p, i});
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{2.0f, 2.1f, 2.2f, 2.3f, 1.0f, 1.1f, 1.2f, 1.3f}),
|
||||
read_vector<float>(result),
|
||||
MIN_FLOAT_TOLERANCE_BITS));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, gather_no_axis_int8)
|
||||
{
|
||||
Shape params_shape{3, 2};
|
||||
|
||||
494
ngraph/test/backend/gather_nd.in.cpp
Normal file
494
ngraph/test/backend/gather_nd.in.cpp
Normal file
@@ -0,0 +1,494 @@
|
||||
//*****************************************************************************
|
||||
// Copyright 2017-2020 Intel Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//*****************************************************************************
|
||||
|
||||
#include <algorithm>
|
||||
#include <cinttypes>
|
||||
#include <cmath>
|
||||
#include <cstdlib>
|
||||
#include <random>
|
||||
#include <string>
|
||||
|
||||
#include "gtest/gtest.h"
|
||||
#include "ngraph/ngraph.hpp"
|
||||
#include "ngraph/runtime/tensor.hpp"
|
||||
#include "runtime/backend.hpp"
|
||||
#include "util/all_close.hpp"
|
||||
#include "util/all_close_f.hpp"
|
||||
#include "util/ndarray.hpp"
|
||||
#include "util/random.hpp"
|
||||
#include "util/test_case.hpp"
|
||||
#include "util/test_control.hpp"
|
||||
#include "util/test_tools.hpp"
|
||||
|
||||
NGRAPH_SUPPRESS_DEPRECATED_START
|
||||
|
||||
using namespace std;
|
||||
using namespace ngraph;
|
||||
|
||||
static string s_manifest = "${MANIFEST}";
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, gather_nd_single_indices)
|
||||
{
|
||||
Shape params_shape{3, 3};
|
||||
Shape indices_shape{2};
|
||||
Shape out_shape{};
|
||||
auto P = make_shared<op::Parameter>(element::f32, params_shape);
|
||||
auto I = make_shared<op::Parameter>(element::i32, indices_shape);
|
||||
auto G = make_shared<op::GatherND>(P, I);
|
||||
auto f = make_shared<Function>(G, ParameterVector{P, I});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto p = backend->create_tensor(element::f32, params_shape);
|
||||
copy_data(p, vector<float>{1.0f, 1.1f, 1.2f, 1.3f, 1.4f, 1.5f, 1.6f, 1.7f, 1.8f});
|
||||
auto i = backend->create_tensor(element::i32, indices_shape);
|
||||
copy_data(i, vector<int32_t>{1, 2});
|
||||
auto result = backend->create_tensor(element::f32, out_shape);
|
||||
|
||||
auto c = backend->compile(f);
|
||||
c->call_with_validate({result}, {p, i});
|
||||
EXPECT_TRUE(test::all_close_f(
|
||||
(vector<float>{1.5f}), read_vector<float>(result), MIN_FLOAT_TOLERANCE_BITS));
|
||||
|
||||
auto G5 = make_shared<op::v5::GatherND>(P, I);
|
||||
auto f5 = make_shared<Function>(G5, ParameterVector{P, I});
|
||||
auto c5 = backend->compile(f5);
|
||||
c5->call_with_validate({result}, {p, i});
|
||||
EXPECT_TRUE(test::all_close_f(
|
||||
(vector<float>{1.5f}), read_vector<float>(result), MIN_FLOAT_TOLERANCE_BITS));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, gather_nd_scalar_from_2d)
|
||||
{
|
||||
Shape params_shape{2, 2};
|
||||
Shape indices_shape{2, 2};
|
||||
Shape out_shape{2};
|
||||
auto P = make_shared<op::Parameter>(element::f32, params_shape);
|
||||
auto I = make_shared<op::Parameter>(element::i32, indices_shape);
|
||||
auto G = make_shared<op::GatherND>(P, I);
|
||||
auto f = make_shared<Function>(G, ParameterVector{P, I});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto p = backend->create_tensor(element::f32, params_shape);
|
||||
copy_data(p, vector<float>{1.0f, 1.1f, 1.2f, 1.3f});
|
||||
auto i = backend->create_tensor(element::i32, indices_shape);
|
||||
copy_data(i, vector<int32_t>{0, 0, 1, 1});
|
||||
auto result = backend->create_tensor(element::f32, out_shape);
|
||||
|
||||
auto c = backend->compile(f);
|
||||
c->call_with_validate({result}, {p, i});
|
||||
EXPECT_TRUE(test::all_close_f(
|
||||
(vector<float>{1.0f, 1.3f}), read_vector<float>(result), MIN_FLOAT_TOLERANCE_BITS));
|
||||
|
||||
auto G5 = make_shared<op::v5::GatherND>(P, I);
|
||||
auto f5 = make_shared<Function>(G5, ParameterVector{P, I});
|
||||
auto c5 = backend->compile(f5);
|
||||
c5->call_with_validate({result}, {p, i});
|
||||
EXPECT_TRUE(test::all_close_f(
|
||||
(vector<float>{1.0f, 1.3f}), read_vector<float>(result), MIN_FLOAT_TOLERANCE_BITS));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, gather_nd_1d_from_2d)
|
||||
{
|
||||
Shape params_shape{2, 2};
|
||||
Shape indices_shape{2, 1};
|
||||
Shape out_shape{2, 2};
|
||||
auto P = make_shared<op::Parameter>(element::f32, params_shape);
|
||||
auto I = make_shared<op::Parameter>(element::i32, indices_shape);
|
||||
auto G = make_shared<op::GatherND>(P, I);
|
||||
auto f = make_shared<Function>(G, ParameterVector{P, I});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto p = backend->create_tensor(element::f32, params_shape);
|
||||
copy_data(p, vector<float>{1.0f, 1.1f, 1.2f, 1.3f});
|
||||
auto i = backend->create_tensor(element::i32, indices_shape);
|
||||
copy_data(i, vector<int32_t>{1, 0});
|
||||
auto result = backend->create_tensor(element::f32, out_shape);
|
||||
|
||||
auto c = backend->compile(f);
|
||||
c->call_with_validate({result}, {p, i});
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{1.2f, 1.3f, 1.0f, 1.1f}),
|
||||
read_vector<float>(result),
|
||||
MIN_FLOAT_TOLERANCE_BITS));
|
||||
|
||||
auto G5 = make_shared<op::v5::GatherND>(P, I);
|
||||
auto f5 = make_shared<Function>(G5, ParameterVector{P, I});
|
||||
auto c5 = backend->compile(f5);
|
||||
c5->call_with_validate({result}, {p, i});
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{1.2f, 1.3f, 1.0f, 1.1f}),
|
||||
read_vector<float>(result),
|
||||
MIN_FLOAT_TOLERANCE_BITS));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, gather_nd_scalar_from_3d)
|
||||
{
|
||||
Shape params_shape{2, 2, 2};
|
||||
Shape indices_shape{2, 3};
|
||||
Shape out_shape{2};
|
||||
auto P = make_shared<op::Parameter>(element::f32, params_shape);
|
||||
auto I = make_shared<op::Parameter>(element::i32, indices_shape);
|
||||
auto G = make_shared<op::GatherND>(P, I);
|
||||
auto f = make_shared<Function>(G, ParameterVector{P, I});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto p = backend->create_tensor(element::f32, params_shape);
|
||||
copy_data(p, vector<float>{1.0f, 1.1f, 1.2f, 1.3f, 2.0f, 2.1f, 2.2f, 2.3f});
|
||||
auto i = backend->create_tensor(element::i32, indices_shape);
|
||||
copy_data(i, vector<int32_t>{0, 0, 1, 1, 0, 1});
|
||||
auto result = backend->create_tensor(element::f32, out_shape);
|
||||
|
||||
auto c = backend->compile(f);
|
||||
c->call_with_validate({result}, {p, i});
|
||||
EXPECT_TRUE(test::all_close_f(
|
||||
(vector<float>{1.1f, 2.1f}), read_vector<float>(result), MIN_FLOAT_TOLERANCE_BITS));
|
||||
|
||||
auto G5 = make_shared<op::v5::GatherND>(P, I);
|
||||
auto f5 = make_shared<Function>(G5, ParameterVector{P, I});
|
||||
auto c5 = backend->compile(f5);
|
||||
c5->call_with_validate({result}, {p, i});
|
||||
EXPECT_TRUE(test::all_close_f(
|
||||
(vector<float>{1.1f, 2.1f}), read_vector<float>(result), MIN_FLOAT_TOLERANCE_BITS));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, gather_nd_1d_from_3d)
|
||||
{
|
||||
Shape params_shape{2, 2, 2};
|
||||
Shape indices_shape{2, 2};
|
||||
Shape out_shape{2, 2};
|
||||
auto P = make_shared<op::Parameter>(element::f32, params_shape);
|
||||
auto I = make_shared<op::Parameter>(element::i32, indices_shape);
|
||||
auto G = make_shared<op::GatherND>(P, I);
|
||||
auto f = make_shared<Function>(G, ParameterVector{P, I});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto p = backend->create_tensor(element::f32, params_shape);
|
||||
copy_data(p, vector<float>{1.0f, 1.1f, 1.2f, 1.3f, 2.0f, 2.1f, 2.2f, 2.3f});
|
||||
auto i = backend->create_tensor(element::i32, indices_shape);
|
||||
copy_data(i, vector<int32_t>{0, 1, 1, 0});
|
||||
auto result = backend->create_tensor(element::f32, out_shape);
|
||||
|
||||
auto c = backend->compile(f);
|
||||
c->call_with_validate({result}, {p, i});
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{1.2f, 1.3f, 2.0f, 2.1f}),
|
||||
read_vector<float>(result),
|
||||
MIN_FLOAT_TOLERANCE_BITS));
|
||||
|
||||
auto G5 = make_shared<op::v5::GatherND>(P, I);
|
||||
auto f5 = make_shared<Function>(G5, ParameterVector{P, I});
|
||||
auto c5 = backend->compile(f5);
|
||||
c5->call_with_validate({result}, {p, i});
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{1.2f, 1.3f, 2.0f, 2.1f}),
|
||||
read_vector<float>(result),
|
||||
MIN_FLOAT_TOLERANCE_BITS));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, gather_nd_2d_from_3d)
|
||||
{
|
||||
Shape params_shape{2, 2, 2};
|
||||
Shape indices_shape{1, 1};
|
||||
Shape out_shape{1, 2, 2};
|
||||
auto P = make_shared<op::Parameter>(element::f32, params_shape);
|
||||
auto I = make_shared<op::Parameter>(element::i32, indices_shape);
|
||||
auto G = make_shared<op::GatherND>(P, I);
|
||||
auto f = make_shared<Function>(G, ParameterVector{P, I});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto p = backend->create_tensor(element::f32, params_shape);
|
||||
copy_data(p, vector<float>{1.0f, 1.1f, 1.2f, 1.3f, 2.0f, 2.1f, 2.2f, 2.3f});
|
||||
auto i = backend->create_tensor(element::i32, indices_shape);
|
||||
copy_data(i, vector<int32_t>{1});
|
||||
auto result = backend->create_tensor(element::f32, out_shape);
|
||||
|
||||
auto c = backend->compile(f);
|
||||
c->call_with_validate({result}, {p, i});
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{2.0f, 2.1f, 2.2f, 2.3f}),
|
||||
read_vector<float>(result),
|
||||
MIN_FLOAT_TOLERANCE_BITS));
|
||||
|
||||
auto G5 = make_shared<op::v5::GatherND>(P, I);
|
||||
auto f5 = make_shared<Function>(G5, ParameterVector{P, I});
|
||||
auto c5 = backend->compile(f5);
|
||||
c5->call_with_validate({result}, {p, i});
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{2.0f, 2.1f, 2.2f, 2.3f}),
|
||||
read_vector<float>(result),
|
||||
MIN_FLOAT_TOLERANCE_BITS));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, gather_nd_batch_scalar_from_2d)
|
||||
{
|
||||
Shape params_shape{2, 2};
|
||||
Shape indices_shape{2, 1, 2};
|
||||
Shape out_shape{2, 1};
|
||||
auto P = make_shared<op::Parameter>(element::f32, params_shape);
|
||||
auto I = make_shared<op::Parameter>(element::i32, indices_shape);
|
||||
auto G = make_shared<op::GatherND>(P, I);
|
||||
auto f = make_shared<Function>(G, ParameterVector{P, I});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto p = backend->create_tensor(element::f32, params_shape);
|
||||
copy_data(p, vector<float>{1.0f, 1.1f, 1.2f, 1.3f});
|
||||
auto i = backend->create_tensor(element::i32, indices_shape);
|
||||
copy_data(i, vector<int32_t>{0, 0, 0, 1});
|
||||
auto result = backend->create_tensor(element::f32, out_shape);
|
||||
|
||||
auto c = backend->compile(f);
|
||||
c->call_with_validate({result}, {p, i});
|
||||
EXPECT_TRUE(test::all_close_f(
|
||||
(vector<float>{1.0f, 1.1f}), read_vector<float>(result), MIN_FLOAT_TOLERANCE_BITS));
|
||||
|
||||
auto G5 = make_shared<op::v5::GatherND>(P, I);
|
||||
auto f5 = make_shared<Function>(G5, ParameterVector{P, I});
|
||||
auto c5 = backend->compile(f5);
|
||||
c5->call_with_validate({result}, {p, i});
|
||||
EXPECT_TRUE(test::all_close_f(
|
||||
(vector<float>{1.0f, 1.1f}), read_vector<float>(result), MIN_FLOAT_TOLERANCE_BITS));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, gather_nd_batch_1d_from_2d)
|
||||
{
|
||||
Shape params_shape{2, 2};
|
||||
Shape indices_shape{2, 1, 1};
|
||||
Shape out_shape{2, 1, 2};
|
||||
auto P = make_shared<op::Parameter>(element::f32, params_shape);
|
||||
auto I = make_shared<op::Parameter>(element::i32, indices_shape);
|
||||
auto G = make_shared<op::GatherND>(P, I);
|
||||
auto f = make_shared<Function>(G, ParameterVector{P, I});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto p = backend->create_tensor(element::f32, params_shape);
|
||||
copy_data(p, vector<float>{1.0f, 1.1f, 1.2f, 1.3f});
|
||||
auto i = backend->create_tensor(element::i32, indices_shape);
|
||||
copy_data(i, vector<int32_t>{1, 0});
|
||||
auto result = backend->create_tensor(element::f32, out_shape);
|
||||
|
||||
auto c = backend->compile(f);
|
||||
c->call_with_validate({result}, {p, i});
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{1.2f, 1.3f, 1.0f, 1.1f}),
|
||||
read_vector<float>(result),
|
||||
MIN_FLOAT_TOLERANCE_BITS));
|
||||
|
||||
auto G5 = make_shared<op::v5::GatherND>(P, I);
|
||||
auto f5 = make_shared<Function>(G5, ParameterVector{P, I});
|
||||
auto c5 = backend->compile(f5);
|
||||
c5->call_with_validate({result}, {p, i});
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{1.2f, 1.3f, 1.0f, 1.1f}),
|
||||
read_vector<float>(result),
|
||||
MIN_FLOAT_TOLERANCE_BITS));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, gather_nd_batch_scalar_from_3d)
|
||||
{
|
||||
Shape params_shape{2, 2, 2};
|
||||
Shape indices_shape{2, 2, 3};
|
||||
Shape out_shape{2, 2};
|
||||
auto P = make_shared<op::Parameter>(element::f32, params_shape);
|
||||
auto I = make_shared<op::Parameter>(element::i32, indices_shape);
|
||||
auto G = make_shared<op::GatherND>(P, I);
|
||||
auto f = make_shared<Function>(G, ParameterVector{P, I});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto p = backend->create_tensor(element::f32, params_shape);
|
||||
copy_data(p, vector<float>{1.0f, 1.1f, 1.2f, 1.3f, 2.0f, 2.1f, 2.2f, 2.3f});
|
||||
auto i = backend->create_tensor(element::i32, indices_shape);
|
||||
copy_data(i, vector<int32_t>{0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0});
|
||||
auto result = backend->create_tensor(element::f32, out_shape);
|
||||
|
||||
auto c = backend->compile(f);
|
||||
c->call_with_validate({result}, {p, i});
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{1.1f, 2.1f, 1.3f, 2.2f}),
|
||||
read_vector<float>(result),
|
||||
MIN_FLOAT_TOLERANCE_BITS));
|
||||
|
||||
auto G5 = make_shared<op::v5::GatherND>(P, I);
|
||||
auto f5 = make_shared<Function>(G5, ParameterVector{P, I});
|
||||
auto c5 = backend->compile(f5);
|
||||
c5->call_with_validate({result}, {p, i});
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{1.1f, 2.1f, 1.3f, 2.2f}),
|
||||
read_vector<float>(result),
|
||||
MIN_FLOAT_TOLERANCE_BITS));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, gather_nd_batch_1d_from_3d)
|
||||
{
|
||||
Shape params_shape{2, 2, 2};
|
||||
Shape indices_shape{2, 2, 2};
|
||||
Shape out_shape{2, 2, 2};
|
||||
auto P = make_shared<op::Parameter>(element::f32, params_shape);
|
||||
auto I = make_shared<op::Parameter>(element::i32, indices_shape);
|
||||
auto G = make_shared<op::GatherND>(P, I);
|
||||
auto f = make_shared<Function>(G, ParameterVector{P, I});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto p = backend->create_tensor(element::f32, params_shape);
|
||||
copy_data(p, vector<float>{1.0f, 1.1f, 1.2f, 1.3f, 2.0f, 2.1f, 2.2f, 2.3f});
|
||||
auto i = backend->create_tensor(element::i32, indices_shape);
|
||||
copy_data(i, vector<int32_t>{0, 1, 1, 0, 0, 0, 1, 1});
|
||||
auto result = backend->create_tensor(element::f32, out_shape);
|
||||
|
||||
auto c = backend->compile(f);
|
||||
c->call_with_validate({result}, {p, i});
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{1.2f, 1.3f, 2.0f, 2.1f, 1.0f, 1.1f, 2.2f, 2.3f}),
|
||||
read_vector<float>(result),
|
||||
MIN_FLOAT_TOLERANCE_BITS));
|
||||
|
||||
auto G5 = make_shared<op::v5::GatherND>(P, I);
|
||||
auto f5 = make_shared<Function>(G5, ParameterVector{P, I});
|
||||
auto c5 = backend->compile(f5);
|
||||
c5->call_with_validate({result}, {p, i});
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{1.2f, 1.3f, 2.0f, 2.1f, 1.0f, 1.1f, 2.2f, 2.3f}),
|
||||
read_vector<float>(result),
|
||||
MIN_FLOAT_TOLERANCE_BITS));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, gather_nd_batch_2d_from_3d)
|
||||
{
|
||||
Shape params_shape{2, 2, 2};
|
||||
Shape indices_shape{2, 1, 1};
|
||||
Shape out_shape{2, 1, 2, 2};
|
||||
auto P = make_shared<op::Parameter>(element::f32, params_shape);
|
||||
auto I = make_shared<op::Parameter>(element::i32, indices_shape);
|
||||
auto G = make_shared<op::GatherND>(P, I);
|
||||
auto f = make_shared<Function>(G, ParameterVector{P, I});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto p = backend->create_tensor(element::f32, params_shape);
|
||||
copy_data(p, vector<float>{1.0f, 1.1f, 1.2f, 1.3f, 2.0f, 2.1f, 2.2f, 2.3f});
|
||||
auto i = backend->create_tensor(element::i32, indices_shape);
|
||||
copy_data(i, vector<int32_t>{1, 0});
|
||||
auto result = backend->create_tensor(element::f32, out_shape);
|
||||
|
||||
auto c = backend->compile(f);
|
||||
c->call_with_validate({result}, {p, i});
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{2.0f, 2.1f, 2.2f, 2.3f, 1.0f, 1.1f, 1.2f, 1.3f}),
|
||||
read_vector<float>(result),
|
||||
MIN_FLOAT_TOLERANCE_BITS));
|
||||
|
||||
auto G5 = make_shared<op::v5::GatherND>(P, I);
|
||||
auto f5 = make_shared<Function>(G5, ParameterVector{P, I});
|
||||
auto c5 = backend->compile(f5);
|
||||
c5->call_with_validate({result}, {p, i});
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{2.0f, 2.1f, 2.2f, 2.3f, 1.0f, 1.1f, 1.2f, 1.3f}),
|
||||
read_vector<float>(result),
|
||||
MIN_FLOAT_TOLERANCE_BITS));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, gather_nd_batch_dims1)
|
||||
{
|
||||
Shape params_shape{2, 3, 4};
|
||||
Shape indices_shape{2, 1};
|
||||
Shape out_shape{2, 4};
|
||||
int batch_dims = 1;
|
||||
auto P = make_shared<op::Parameter>(element::f32, params_shape);
|
||||
auto I = make_shared<op::Parameter>(element::i32, indices_shape);
|
||||
auto G = make_shared<op::v5::GatherND>(P, I, batch_dims);
|
||||
auto f = make_shared<Function>(G, ParameterVector{P, I});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto p = backend->create_tensor(element::f32, params_shape);
|
||||
copy_data(p, vector<float>{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
|
||||
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24});
|
||||
auto i = backend->create_tensor(element::i32, indices_shape);
|
||||
copy_data(i, vector<int32_t>{1, 0});
|
||||
auto result = backend->create_tensor(element::f32, out_shape);
|
||||
|
||||
auto c = backend->compile(f);
|
||||
c->call_with_validate({result}, {p, i});
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{5, 6, 7, 8, 13, 14, 15, 16}),
|
||||
read_vector<float>(result),
|
||||
MIN_FLOAT_TOLERANCE_BITS));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, gather_nd_batch_dims2)
|
||||
{
|
||||
Shape params_shape{2, 3, 4, 2};
|
||||
Shape indices_shape{2, 3, 3, 2};
|
||||
Shape out_shape{6, 3};
|
||||
int batch_dims = 2;
|
||||
auto P = make_shared<op::Parameter>(element::f32, params_shape);
|
||||
auto I = make_shared<op::Parameter>(element::i32, indices_shape);
|
||||
auto G = make_shared<op::v5::GatherND>(P, I, batch_dims);
|
||||
auto f = make_shared<Function>(G, ParameterVector{P, I});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto p = backend->create_tensor(element::f32, params_shape);
|
||||
copy_data(p, vector<float>{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
|
||||
17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
|
||||
33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48});
|
||||
auto i = backend->create_tensor(element::i32, indices_shape);
|
||||
copy_data(i, vector<int32_t>{1, 0, 3, 1, 2, 1, 0, 1, 1, 1, 2, 0, 3, 0, 3, 1, 2, 1,
|
||||
2, 0, 1, 1, 3, 1, 1, 1, 2, 0, 2, 0, 0, 0, 3, 1, 3, 1});
|
||||
auto result = backend->create_tensor(element::f32, out_shape);
|
||||
|
||||
auto c = backend->compile(f);
|
||||
c->call_with_validate({result}, {p, i});
|
||||
EXPECT_TRUE(test::all_close_f(
|
||||
(vector<float>{3, 8, 6, 10, 12, 13, 23, 24, 22, 29, 28, 32, 36, 37, 37, 41, 48, 48}),
|
||||
read_vector<float>(result),
|
||||
MIN_FLOAT_TOLERANCE_BITS));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, gather_nd_batch_dims2_lead_dims)
|
||||
{
|
||||
Shape params_shape{2, 3, 4};
|
||||
Shape indices_shape{2, 3, 1, 1};
|
||||
Shape out_shape{6, 1};
|
||||
int batch_dims = 2;
|
||||
auto P = make_shared<op::Parameter>(element::f32, params_shape);
|
||||
auto I = make_shared<op::Parameter>(element::i32, indices_shape);
|
||||
auto G = make_shared<op::v5::GatherND>(P, I, batch_dims);
|
||||
auto f = make_shared<Function>(G, ParameterVector{P, I});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto p = backend->create_tensor(element::f32, params_shape);
|
||||
copy_data(p, vector<float>{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
|
||||
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24});
|
||||
auto i = backend->create_tensor(element::i32, indices_shape);
|
||||
copy_data(i, vector<int32_t>{1, 0, 2, 0, 2, 2});
|
||||
auto result = backend->create_tensor(element::f32, out_shape);
|
||||
|
||||
auto c = backend->compile(f);
|
||||
c->call_with_validate({result}, {p, i});
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{2, 5, 11, 13, 19, 23}),
|
||||
read_vector<float>(result),
|
||||
MIN_FLOAT_TOLERANCE_BITS));
|
||||
}
|
||||
@@ -787,6 +787,9 @@ gather_nd_batch_1d_from_2d
|
||||
gather_nd_batch_scalar_from_3d
|
||||
gather_nd_batch_1d_from_3d
|
||||
gather_nd_batch_2d_from_3d
|
||||
gather_nd_batch_dims1
|
||||
gather_nd_batch_dims2
|
||||
gather_nd_batch_dims2_lead_dims
|
||||
|
||||
# Cannot cast ngraph node Stack to CNNLayer!
|
||||
stack_matrix_rowise
|
||||
|
||||
@@ -712,6 +712,35 @@ protected:
|
||||
}
|
||||
break;
|
||||
}
|
||||
case OP_TYPEID::GatherND_v5:
|
||||
{
|
||||
const op::v5::GatherND* gatherNDNode = static_cast<const op::v5::GatherND*>(&node);
|
||||
if (node.get_input_element_type(1) == element::i64)
|
||||
{
|
||||
reference::gather_nd<T, int64_t>(args[0]->get_data_ptr<T>(),
|
||||
args[1]->get_data_ptr<int64_t>(),
|
||||
out[0]->get_data_ptr<T>(),
|
||||
node.get_input_shape(0),
|
||||
node.get_input_shape(1),
|
||||
node.get_output_shape(0),
|
||||
gatherNDNode->get_batch_dims());
|
||||
}
|
||||
else if (node.get_input_element_type(1) == element::i32)
|
||||
{
|
||||
reference::gather_nd<T, int32_t>(args[0]->get_data_ptr<T>(),
|
||||
args[1]->get_data_ptr<int32_t>(),
|
||||
out[0]->get_data_ptr<T>(),
|
||||
node.get_input_shape(0),
|
||||
node.get_input_shape(1),
|
||||
node.get_output_shape(0),
|
||||
gatherNDNode->get_batch_dims());
|
||||
}
|
||||
else
|
||||
{
|
||||
throw ngraph_error("Unexpected type");
|
||||
}
|
||||
break;
|
||||
}
|
||||
case OP_TYPEID::GRUCell_v3:
|
||||
{
|
||||
const op::v3::GRUCell* gru_cell = static_cast<const op::v3::GRUCell*>(&node);
|
||||
|
||||
@@ -51,6 +51,7 @@ NGRAPH_OP(LSTMCell, op::v4)
|
||||
#undef ID_SUFFIX
|
||||
|
||||
#define ID_SUFFIX(NAME) NAME##_v5
|
||||
NGRAPH_OP(GatherND, op::v5)
|
||||
NGRAPH_OP(LSTMSequence, op::v5)
|
||||
NGRAPH_OP(GRUSequence, op::v5)
|
||||
NGRAPH_OP(RNNSequence, op::v5)
|
||||
|
||||
@@ -18,11 +18,160 @@
|
||||
#include "ngraph/ngraph.hpp"
|
||||
#include "util/type_prop.hpp"
|
||||
|
||||
NGRAPH_SUPPRESS_DEPRECATED_START
|
||||
|
||||
using namespace std;
|
||||
using namespace ngraph;
|
||||
|
||||
// ------------------------------ V5 ------------------------------
|
||||
|
||||
TEST(type_prop, gather_nd_slices_from_4d_batch_dims0)
|
||||
{
|
||||
Shape params_shape{2, 3, 11, 12};
|
||||
Shape indices_shape{2, 3, 2};
|
||||
Shape out_shape{2, 3, 11, 12};
|
||||
auto P = make_shared<op::Parameter>(element::f32, params_shape);
|
||||
auto I = make_shared<op::Parameter>(element::i32, indices_shape);
|
||||
auto G5 = make_shared<op::v5::GatherND>(P, I, 0);
|
||||
ASSERT_EQ(G5->get_element_type(), element::f32);
|
||||
ASSERT_EQ(G5->get_shape(), out_shape);
|
||||
}
|
||||
|
||||
TEST(type_prop, gather_nd_scalars_from_4d_batch_dims2)
|
||||
{
|
||||
Shape params_shape{2, 3, 11, 12};
|
||||
Shape indices_shape{2, 3, 2};
|
||||
Shape out_shape{6};
|
||||
auto P = make_shared<op::Parameter>(element::f32, params_shape);
|
||||
auto I = make_shared<op::Parameter>(element::i32, indices_shape);
|
||||
auto G5 = make_shared<op::v5::GatherND>(P, I, 2);
|
||||
ASSERT_EQ(G5->get_element_type(), element::f32);
|
||||
ASSERT_EQ(G5->get_shape(), out_shape);
|
||||
}
|
||||
|
||||
TEST(type_prop, gather_nd_slices_from_5d_batch_dims2)
|
||||
{
|
||||
Shape params_shape{7, 5, 11, 12, 32};
|
||||
Shape indices_shape{7, 5, 3, 1};
|
||||
Shape out_shape{35, 3, 12, 32};
|
||||
auto P = make_shared<op::Parameter>(element::f32, params_shape);
|
||||
auto I = make_shared<op::Parameter>(element::i32, indices_shape);
|
||||
auto G5 = make_shared<op::v5::GatherND>(P, I, 2);
|
||||
ASSERT_EQ(G5->get_element_type(), element::f32);
|
||||
ASSERT_EQ(G5->get_shape(), out_shape);
|
||||
}
|
||||
|
||||
TEST(type_prop, gather_nd_batch_dim2_with_dyn_dim)
|
||||
{
|
||||
PartialShape params_shape{7, Dimension::dynamic(), 11, 12, 32};
|
||||
Shape indices_shape{7, 5, 3, 1};
|
||||
Shape out_shape{35, 3, 12, 32};
|
||||
auto P = make_shared<op::Parameter>(element::f32, params_shape);
|
||||
auto I = make_shared<op::Parameter>(element::i32, indices_shape);
|
||||
auto G5 = make_shared<op::v5::GatherND>(P, I, 2);
|
||||
ASSERT_EQ(G5->get_element_type(), element::f32);
|
||||
ASSERT_EQ(G5->get_shape(), out_shape);
|
||||
}
|
||||
|
||||
TEST(type_prop, gather_nd_batch_dim2_with_dyn_dim2)
|
||||
{
|
||||
PartialShape params_shape{7, Dimension::dynamic(), Dimension::dynamic(), 12, 32};
|
||||
Shape indices_shape{7, 5, 3, 1};
|
||||
Shape out_shape{35, 3, 12, 32};
|
||||
auto P = make_shared<op::Parameter>(element::f32, params_shape);
|
||||
auto I = make_shared<op::Parameter>(element::i32, indices_shape);
|
||||
auto G5 = make_shared<op::v5::GatherND>(P, I, 2);
|
||||
ASSERT_EQ(G5->get_element_type(), element::f32);
|
||||
ASSERT_EQ(G5->get_shape(), out_shape);
|
||||
}
|
||||
|
||||
TEST(type_prop, gather_nd_batch_dim2_with_dyn_dim3)
|
||||
{
|
||||
PartialShape params_shape{
|
||||
7, Dimension::dynamic(), Dimension::dynamic(), 12, Dimension::dynamic()};
|
||||
Shape indices_shape{7, 5, 3, 1};
|
||||
PartialShape out_shape{35, 3, 12, Dimension::dynamic()};
|
||||
auto P = make_shared<op::Parameter>(element::f32, params_shape);
|
||||
auto I = make_shared<op::Parameter>(element::i32, indices_shape);
|
||||
auto G5 = make_shared<op::v5::GatherND>(P, I, 2);
|
||||
ASSERT_EQ(G5->get_element_type(), element::f32);
|
||||
ASSERT_TRUE(G5->get_output_partial_shape(0).same_scheme(out_shape));
|
||||
}
|
||||
|
||||
TEST(type_prop, gather_nd_fail_batch_dims_greater_indices_rank)
|
||||
{
|
||||
Shape params_shape{2, 3, 4, 5};
|
||||
Shape indices_shape{2, 1};
|
||||
auto P = make_shared<op::Parameter>(element::f32, params_shape);
|
||||
auto I = make_shared<op::Parameter>(element::i32, indices_shape);
|
||||
|
||||
try
|
||||
{
|
||||
auto G5 = make_shared<op::v5::GatherND>(P, I, 3);
|
||||
// Should have thrown, so fail if it didn't
|
||||
FAIL() << "Incorrect indices rank";
|
||||
}
|
||||
catch (const NodeValidationFailure& error)
|
||||
{
|
||||
EXPECT_HAS_SUBSTRING(
|
||||
error.what(),
|
||||
std::string("Number of batch dimensions must not exceed a rank of indices."));
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
FAIL() << "Deduced type check failed for unexpected reason";
|
||||
}
|
||||
}
|
||||
|
||||
TEST(type_prop, gather_nd_fail_unequal_batch_dims)
|
||||
{
|
||||
Shape params_shape{2, 3, 4, 5};
|
||||
Shape indices_shape{2, 1, 4};
|
||||
auto P = make_shared<op::Parameter>(element::f32, params_shape);
|
||||
auto I = make_shared<op::Parameter>(element::i32, indices_shape);
|
||||
|
||||
try
|
||||
{
|
||||
auto G5 = make_shared<op::v5::GatherND>(P, I, 2);
|
||||
// Should have thrown, so fail if it didn't
|
||||
FAIL() << "Incorrect indices rank";
|
||||
}
|
||||
catch (const NodeValidationFailure& error)
|
||||
{
|
||||
EXPECT_HAS_SUBSTRING(error.what(),
|
||||
std::string("Batch dimensions of data and indices must be the same."));
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
FAIL() << "Deduced type check failed for unexpected reason";
|
||||
}
|
||||
}
|
||||
|
||||
TEST(type_prop, gather_nd_fail_indices_tuple_greater_data_rank_batch_dims2)
|
||||
{
|
||||
Shape params_shape{2, 1, 4, 5};
|
||||
Shape indices_shape{2, 1, 5, 3};
|
||||
auto P = make_shared<op::Parameter>(element::f32, params_shape);
|
||||
auto I = make_shared<op::Parameter>(element::i32, indices_shape);
|
||||
|
||||
try
|
||||
{
|
||||
auto G5 = make_shared<op::v5::GatherND>(P, I, 2);
|
||||
// Should have thrown, so fail if it didn't
|
||||
FAIL() << "Incorrect indices rank";
|
||||
}
|
||||
catch (const NodeValidationFailure& error)
|
||||
{
|
||||
EXPECT_HAS_SUBSTRING(error.what(),
|
||||
std::string("Length of a tuple with indices must not exceed a rank of "
|
||||
"data tensor excluding batch dimensions."));
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
FAIL() << "Deduced type check failed for unexpected reason";
|
||||
}
|
||||
}
|
||||
|
||||
// ------------------------------ V0 + V5 ------------------------------
|
||||
|
||||
TEST(type_prop, gather_nd_scalar_from_2d)
|
||||
{
|
||||
Shape params_shape{2, 2};
|
||||
@@ -30,9 +179,16 @@ TEST(type_prop, gather_nd_scalar_from_2d)
|
||||
Shape out_shape{2};
|
||||
auto P = make_shared<op::Parameter>(element::f32, params_shape);
|
||||
auto I = make_shared<op::Parameter>(element::i32, indices_shape);
|
||||
auto G = make_shared<op::GatherND>(P, I);
|
||||
|
||||
NGRAPH_SUPPRESS_DEPRECATED_START
|
||||
auto G = make_shared<op::v0::GatherND>(P, I);
|
||||
ASSERT_EQ(G->get_element_type(), element::f32);
|
||||
ASSERT_EQ(G->get_shape(), out_shape);
|
||||
NGRAPH_SUPPRESS_DEPRECATED_END
|
||||
|
||||
auto G5 = make_shared<op::v5::GatherND>(P, I);
|
||||
ASSERT_EQ(G5->get_element_type(), element::f32);
|
||||
ASSERT_EQ(G5->get_shape(), out_shape);
|
||||
}
|
||||
|
||||
TEST(type_prop, gather_nd_1d_from_2d)
|
||||
@@ -42,9 +198,16 @@ TEST(type_prop, gather_nd_1d_from_2d)
|
||||
Shape out_shape{2, 2};
|
||||
auto P = make_shared<op::Parameter>(element::f32, params_shape);
|
||||
auto I = make_shared<op::Parameter>(element::i32, indices_shape);
|
||||
auto G = make_shared<op::GatherND>(P, I);
|
||||
|
||||
NGRAPH_SUPPRESS_DEPRECATED_START
|
||||
auto G = make_shared<op::v0::GatherND>(P, I);
|
||||
ASSERT_EQ(G->get_element_type(), element::f32);
|
||||
ASSERT_EQ(G->get_shape(), out_shape);
|
||||
NGRAPH_SUPPRESS_DEPRECATED_END
|
||||
|
||||
auto G5 = make_shared<op::v5::GatherND>(P, I);
|
||||
ASSERT_EQ(G5->get_element_type(), element::f32);
|
||||
ASSERT_EQ(G5->get_shape(), out_shape);
|
||||
}
|
||||
|
||||
TEST(type_prop, gather_nd_scalar_from_3d)
|
||||
@@ -54,9 +217,16 @@ TEST(type_prop, gather_nd_scalar_from_3d)
|
||||
Shape out_shape{2};
|
||||
auto P = make_shared<op::Parameter>(element::f32, params_shape);
|
||||
auto I = make_shared<op::Parameter>(element::i32, indices_shape);
|
||||
auto G = make_shared<op::GatherND>(P, I);
|
||||
|
||||
NGRAPH_SUPPRESS_DEPRECATED_START
|
||||
auto G = make_shared<op::v0::GatherND>(P, I);
|
||||
ASSERT_EQ(G->get_element_type(), element::f32);
|
||||
ASSERT_EQ(G->get_shape(), out_shape);
|
||||
NGRAPH_SUPPRESS_DEPRECATED_END
|
||||
|
||||
auto G5 = make_shared<op::v5::GatherND>(P, I);
|
||||
ASSERT_EQ(G5->get_element_type(), element::f32);
|
||||
ASSERT_EQ(G5->get_shape(), out_shape);
|
||||
}
|
||||
|
||||
TEST(type_prop, gather_nd_1d_from_3d)
|
||||
@@ -66,9 +236,16 @@ TEST(type_prop, gather_nd_1d_from_3d)
|
||||
Shape out_shape{2, 2};
|
||||
auto P = make_shared<op::Parameter>(element::f32, params_shape);
|
||||
auto I = make_shared<op::Parameter>(element::i32, indices_shape);
|
||||
auto G = make_shared<op::GatherND>(P, I);
|
||||
|
||||
NGRAPH_SUPPRESS_DEPRECATED_START
|
||||
auto G = make_shared<op::v0::GatherND>(P, I);
|
||||
ASSERT_EQ(G->get_element_type(), element::f32);
|
||||
ASSERT_EQ(G->get_shape(), out_shape);
|
||||
NGRAPH_SUPPRESS_DEPRECATED_END
|
||||
|
||||
auto G5 = make_shared<op::v5::GatherND>(P, I);
|
||||
ASSERT_EQ(G5->get_element_type(), element::f32);
|
||||
ASSERT_EQ(G5->get_shape(), out_shape);
|
||||
}
|
||||
|
||||
TEST(type_prop, gather_nd_2d_from_3d)
|
||||
@@ -78,9 +255,16 @@ TEST(type_prop, gather_nd_2d_from_3d)
|
||||
Shape out_shape{1, 2, 2};
|
||||
auto P = make_shared<op::Parameter>(element::f32, params_shape);
|
||||
auto I = make_shared<op::Parameter>(element::i32, indices_shape);
|
||||
auto G = make_shared<op::GatherND>(P, I);
|
||||
|
||||
NGRAPH_SUPPRESS_DEPRECATED_START
|
||||
auto G = make_shared<op::v0::GatherND>(P, I);
|
||||
ASSERT_EQ(G->get_element_type(), element::f32);
|
||||
ASSERT_EQ(G->get_shape(), out_shape);
|
||||
NGRAPH_SUPPRESS_DEPRECATED_END
|
||||
|
||||
auto G5 = make_shared<op::v5::GatherND>(P, I);
|
||||
ASSERT_EQ(G5->get_element_type(), element::f32);
|
||||
ASSERT_EQ(G5->get_shape(), out_shape);
|
||||
}
|
||||
|
||||
TEST(type_prop, gather_nd_batch_scalar_from_2d)
|
||||
@@ -90,9 +274,16 @@ TEST(type_prop, gather_nd_batch_scalar_from_2d)
|
||||
Shape out_shape{2, 1};
|
||||
auto P = make_shared<op::Parameter>(element::f32, params_shape);
|
||||
auto I = make_shared<op::Parameter>(element::i32, indices_shape);
|
||||
auto G = make_shared<op::GatherND>(P, I);
|
||||
|
||||
NGRAPH_SUPPRESS_DEPRECATED_START
|
||||
auto G = make_shared<op::v0::GatherND>(P, I);
|
||||
ASSERT_EQ(G->get_element_type(), element::f32);
|
||||
ASSERT_EQ(G->get_shape(), out_shape);
|
||||
NGRAPH_SUPPRESS_DEPRECATED_END
|
||||
|
||||
auto G5 = make_shared<op::v5::GatherND>(P, I);
|
||||
ASSERT_EQ(G5->get_element_type(), element::f32);
|
||||
ASSERT_EQ(G5->get_shape(), out_shape);
|
||||
}
|
||||
|
||||
TEST(type_prop, gather_nd_batch_1d_from_2d)
|
||||
@@ -102,9 +293,16 @@ TEST(type_prop, gather_nd_batch_1d_from_2d)
|
||||
Shape out_shape{2, 1, 2};
|
||||
auto P = make_shared<op::Parameter>(element::f32, params_shape);
|
||||
auto I = make_shared<op::Parameter>(element::i32, indices_shape);
|
||||
auto G = make_shared<op::GatherND>(P, I);
|
||||
|
||||
NGRAPH_SUPPRESS_DEPRECATED_START
|
||||
auto G = make_shared<op::v0::GatherND>(P, I);
|
||||
ASSERT_EQ(G->get_element_type(), element::f32);
|
||||
ASSERT_EQ(G->get_shape(), out_shape);
|
||||
NGRAPH_SUPPRESS_DEPRECATED_END
|
||||
|
||||
auto G5 = make_shared<op::v5::GatherND>(P, I);
|
||||
ASSERT_EQ(G5->get_element_type(), element::f32);
|
||||
ASSERT_EQ(G5->get_shape(), out_shape);
|
||||
}
|
||||
|
||||
TEST(type_prop, gather_nd_batch_scalar_from_3d)
|
||||
@@ -114,9 +312,16 @@ TEST(type_prop, gather_nd_batch_scalar_from_3d)
|
||||
Shape out_shape{2, 2};
|
||||
auto P = make_shared<op::Parameter>(element::f32, params_shape);
|
||||
auto I = make_shared<op::Parameter>(element::i32, indices_shape);
|
||||
auto G = make_shared<op::GatherND>(P, I);
|
||||
|
||||
NGRAPH_SUPPRESS_DEPRECATED_START
|
||||
auto G = make_shared<op::v0::GatherND>(P, I);
|
||||
ASSERT_EQ(G->get_element_type(), element::f32);
|
||||
ASSERT_EQ(G->get_shape(), out_shape);
|
||||
NGRAPH_SUPPRESS_DEPRECATED_END
|
||||
|
||||
auto G5 = make_shared<op::v5::GatherND>(P, I);
|
||||
ASSERT_EQ(G5->get_element_type(), element::f32);
|
||||
ASSERT_EQ(G5->get_shape(), out_shape);
|
||||
}
|
||||
|
||||
TEST(type_prop, gather_nd_batch_1d_from_3d)
|
||||
@@ -126,9 +331,16 @@ TEST(type_prop, gather_nd_batch_1d_from_3d)
|
||||
Shape out_shape{2, 2, 2};
|
||||
auto P = make_shared<op::Parameter>(element::f32, params_shape);
|
||||
auto I = make_shared<op::Parameter>(element::i32, indices_shape);
|
||||
auto G = make_shared<op::GatherND>(P, I);
|
||||
|
||||
NGRAPH_SUPPRESS_DEPRECATED_START
|
||||
auto G = make_shared<op::v0::GatherND>(P, I);
|
||||
ASSERT_EQ(G->get_element_type(), element::f32);
|
||||
ASSERT_EQ(G->get_shape(), out_shape);
|
||||
NGRAPH_SUPPRESS_DEPRECATED_END
|
||||
|
||||
auto G5 = make_shared<op::v5::GatherND>(P, I);
|
||||
ASSERT_EQ(G5->get_element_type(), element::f32);
|
||||
ASSERT_EQ(G5->get_shape(), out_shape);
|
||||
}
|
||||
|
||||
TEST(type_prop, gather_nd_batch_2d_from_3d)
|
||||
@@ -138,9 +350,16 @@ TEST(type_prop, gather_nd_batch_2d_from_3d)
|
||||
Shape out_shape{2, 1, 2, 2};
|
||||
auto P = make_shared<op::Parameter>(element::f32, params_shape);
|
||||
auto I = make_shared<op::Parameter>(element::i32, indices_shape);
|
||||
auto G = make_shared<op::GatherND>(P, I);
|
||||
|
||||
NGRAPH_SUPPRESS_DEPRECATED_START
|
||||
auto G = make_shared<op::v0::GatherND>(P, I);
|
||||
ASSERT_EQ(G->get_element_type(), element::f32);
|
||||
ASSERT_EQ(G->get_shape(), out_shape);
|
||||
NGRAPH_SUPPRESS_DEPRECATED_END
|
||||
|
||||
auto G5 = make_shared<op::v5::GatherND>(P, I);
|
||||
ASSERT_EQ(G5->get_element_type(), element::f32);
|
||||
ASSERT_EQ(G5->get_shape(), out_shape);
|
||||
}
|
||||
|
||||
TEST(type_prop, gather_nd_fail_params_rank)
|
||||
@@ -150,9 +369,11 @@ TEST(type_prop, gather_nd_fail_params_rank)
|
||||
Shape out_shape{2, 1, 2, 2};
|
||||
auto P = make_shared<op::Parameter>(element::f32, params_shape);
|
||||
auto I = make_shared<op::Parameter>(element::i32, indices_shape);
|
||||
|
||||
NGRAPH_SUPPRESS_DEPRECATED_START
|
||||
try
|
||||
{
|
||||
auto G = make_shared<op::GatherND>(P, I);
|
||||
auto G = make_shared<op::v0::GatherND>(P, I);
|
||||
// Should have thrown, so fail if it didn't
|
||||
FAIL() << "Incorrect params rank";
|
||||
}
|
||||
@@ -164,6 +385,22 @@ TEST(type_prop, gather_nd_fail_params_rank)
|
||||
{
|
||||
FAIL() << "Deduced type check failed for unexpected reason";
|
||||
}
|
||||
NGRAPH_SUPPRESS_DEPRECATED_END
|
||||
|
||||
try
|
||||
{
|
||||
auto G5 = make_shared<op::v5::GatherND>(P, I);
|
||||
// Should have thrown, so fail if it didn't
|
||||
FAIL() << "Incorrect params rank";
|
||||
}
|
||||
catch (const NodeValidationFailure& error)
|
||||
{
|
||||
EXPECT_HAS_SUBSTRING(error.what(), std::string("Data rank must be at least 1."));
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
FAIL() << "Deduced type check failed for unexpected reason";
|
||||
}
|
||||
}
|
||||
|
||||
TEST(type_prop, gather_nd_fail_indices_rank)
|
||||
@@ -173,9 +410,11 @@ TEST(type_prop, gather_nd_fail_indices_rank)
|
||||
Shape out_shape{2, 1, 2, 2};
|
||||
auto P = make_shared<op::Parameter>(element::f32, params_shape);
|
||||
auto I = make_shared<op::Parameter>(element::i32, indices_shape);
|
||||
|
||||
NGRAPH_SUPPRESS_DEPRECATED_START
|
||||
try
|
||||
{
|
||||
auto G = make_shared<op::GatherND>(P, I);
|
||||
auto G = make_shared<op::v0::GatherND>(P, I);
|
||||
// Should have thrown, so fail if it didn't
|
||||
FAIL() << "Incorrect indices rank";
|
||||
}
|
||||
@@ -188,6 +427,22 @@ TEST(type_prop, gather_nd_fail_indices_rank)
|
||||
{
|
||||
FAIL() << "Deduced type check failed for unexpected reason";
|
||||
}
|
||||
NGRAPH_SUPPRESS_DEPRECATED_END
|
||||
|
||||
try
|
||||
{
|
||||
auto G5 = make_shared<op::v5::GatherND>(P, I);
|
||||
// Should have thrown, so fail if it didn't
|
||||
FAIL() << "Incorrect indices rank";
|
||||
}
|
||||
catch (const NodeValidationFailure& error)
|
||||
{
|
||||
EXPECT_HAS_SUBSTRING(error.what(), std::string("Indices rank must be at least 1."));
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
FAIL() << "Deduced type check failed for unexpected reason";
|
||||
}
|
||||
}
|
||||
|
||||
TEST(type_prop, gather_nd_fail_indices_element_type)
|
||||
@@ -196,10 +451,12 @@ TEST(type_prop, gather_nd_fail_indices_element_type)
|
||||
Shape indices_shape{2, 1, 1};
|
||||
Shape out_shape{2, 1, 2, 2};
|
||||
auto P = make_shared<op::Parameter>(element::f32, params_shape);
|
||||
auto I = make_shared<op::Parameter>(element::i16, indices_shape);
|
||||
auto I = make_shared<op::Parameter>(element::f32, indices_shape);
|
||||
|
||||
NGRAPH_SUPPRESS_DEPRECATED_START
|
||||
try
|
||||
{
|
||||
auto G = make_shared<op::GatherND>(P, I);
|
||||
auto G = make_shared<op::v0::GatherND>(P, I);
|
||||
// Should have thrown, so fail if it didn't
|
||||
FAIL() << "Incorrect indices element type";
|
||||
}
|
||||
@@ -211,4 +468,21 @@ TEST(type_prop, gather_nd_fail_indices_element_type)
|
||||
{
|
||||
FAIL() << "Deduced type check failed for unexpected reason";
|
||||
}
|
||||
NGRAPH_SUPPRESS_DEPRECATED_END
|
||||
|
||||
try
|
||||
{
|
||||
auto G5 = make_shared<op::v5::GatherND>(P, I);
|
||||
// Should have thrown, so fail if it didn't
|
||||
FAIL() << "Incorrect indices element type";
|
||||
}
|
||||
catch (const NodeValidationFailure& error)
|
||||
{
|
||||
EXPECT_HAS_SUBSTRING(error.what(),
|
||||
std::string("The indices type is expected to be an integer type."));
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
FAIL() << "Deduced type check failed for unexpected reason";
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user