[IE Tests] Added NormalizeL2 tests (#2327)

* Added NormalizeL2 tests

* Added NormalizeL2 reference

* Add nGraph tests

* Fix tests

* Added NormalizeL2 builder
This commit is contained in:
Liubov Batanina
2020-10-08 07:23:25 +03:00
committed by GitHub
parent 8062f20c15
commit 7f78dd797e
12 changed files with 477 additions and 4 deletions

View File

@@ -303,6 +303,7 @@ set(MULTI_TEST_SRC
backend/multiply.in.cpp
backend/negative.in.cpp
backend/node_name.in.cpp
backend/normalize_l2.in.cpp
backend/not.in.cpp
backend/non_zero.in.cpp
backend/numeric.in.cpp

View File

@@ -502,9 +502,13 @@ NGRAPH_TEST(${BACKEND_NAME}, normalize_across_empty_axes_input)
test_case.add_input<float>(input_data);
// output should be filled with 1f values
test_case.add_expected_output<float>(data_shape, vector<float>(shape_size(data_shape), 1));
test_case.add_expected_output<float>(
data_shape,
vector<float>{0.01428571, 0.02857143, 0.04285714, 0.05714286, 0.07142857, 0.08571429,
0.1, 0.11428571, 0.12857144, 0.14285715, 0.15714286, 0.17142858,
0.18571429, 0.2, 0.21428572, 0.22857143, 0.24285714, 0.25714287,
0.27142859, 0.2857143, 0.3, 0.31428573, 0.32857144, 0.34285715});
test_case.run(DEFAULT_FLOAT_TOLERANCE_BITS + 1);
}

View File

@@ -0,0 +1,225 @@
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include <algorithm>
#include <cinttypes>
#include <cmath>
#include <cstdlib>
#include <numeric>
#include <random>
#include <string>
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "util/all_close.hpp"
#include "util/all_close_f.hpp"
#include "util/ndarray.hpp"
#include "util/random.hpp"
#include "util/test_control.hpp"
#include "util/test_tools.hpp"
using namespace std;
using namespace ngraph;
static string s_manifest = "${MANIFEST}";
// ----------------------- eps_mode = ngraph::op::EpsMode::ADD ----------------------- //
NGRAPH_TEST(${BACKEND_NAME}, normalize_l2_all_mode_add)
{
Shape shape{2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto axes = make_shared<op::Constant>(element::i64, Shape{2}, vector<int64_t>{0, 1});
float eps = 1e-7;
auto f = make_shared<Function>(
make_shared<op::v0::NormalizeL2>(A, axes, eps, ngraph::op::EpsMode::ADD),
ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
copy_data(a, vector<float>{1, 2, 3, 4});
auto result = backend->create_tensor(element::f32, shape);
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_TRUE(test::all_close_f((vector<float>{0.18257418, 0.36514837, 0.5477226, 0.73029673}),
read_vector<float>(result)));
}
NGRAPH_TEST(${BACKEND_NAME}, normalize_l2_none_mode_add)
{
Shape shape{2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto axes = make_shared<op::Constant>(element::i64, Shape{0}, vector<int64_t>{});
float eps = 1e-7;
auto f = make_shared<Function>(
make_shared<op::v0::NormalizeL2>(A, axes, eps, ngraph::op::EpsMode::ADD),
ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
copy_data(a, vector<float>{1, 2, 3, 4});
auto result = backend->create_tensor(element::f32, shape);
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_TRUE(test::all_close_f((vector<float>{0.18257418, 0.36514837, 0.5477226, 0.73029673}),
read_vector<float>(result)));
}
NGRAPH_TEST(${BACKEND_NAME}, normalize_l2_zero_mode_add)
{
Shape shape{2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto axes = make_shared<op::Constant>(element::i64, Shape{}, vector<int64_t>{0});
float eps = 1e-7;
auto f = make_shared<Function>(
make_shared<op::v0::NormalizeL2>(A, axes, eps, ngraph::op::EpsMode::ADD),
ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
copy_data(a, vector<float>{1, 2, 3, 4});
auto result = backend->create_tensor(element::f32, shape);
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_TRUE(test::all_close_f((vector<float>{0.31622776, 0.4472136, 0.94868326, 0.8944272}),
read_vector<float>(result)));
}
NGRAPH_TEST(${BACKEND_NAME}, normalize_l2_one_mode_add)
{
Shape shape{2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto axes = make_shared<op::Constant>(element::i64, Shape{}, vector<int64_t>{1});
float eps = 1e-7;
auto f = make_shared<Function>(
make_shared<op::v0::NormalizeL2>(A, axes, eps, ngraph::op::EpsMode::ADD),
ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
copy_data(a, vector<float>{1, 2, 3, 4});
auto result = backend->create_tensor(element::f32, shape);
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_TRUE(test::all_close_f((vector<float>{0.4472136, 0.8944272, 0.6, 0.8}),
read_vector<float>(result)));
}
// ----------------------- eps_mode = ngraph::op::EpsMode::MAX ----------------------- //
NGRAPH_TEST(${BACKEND_NAME}, normalize_l2_all_mode_max)
{
Shape shape{2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto axes = make_shared<op::Constant>(element::i64, Shape{2}, vector<int64_t>{0, 1});
float eps = 1e-7;
auto f = make_shared<Function>(
make_shared<op::v0::NormalizeL2>(A, axes, eps, ngraph::op::EpsMode::ADD),
ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
copy_data(a, vector<float>{1, 2, 3, 4});
auto result = backend->create_tensor(element::f32, shape);
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_TRUE(test::all_close_f((vector<float>{0.18257419, 0.36514837, 0.54772256, 0.73029674}),
read_vector<float>(result)));
}
NGRAPH_TEST(${BACKEND_NAME}, normalize_l2_none_mode_max)
{
Shape shape{2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto axes = make_shared<op::Constant>(element::i64, Shape{0}, vector<int64_t>{});
float eps = 1e-7;
auto f = make_shared<Function>(
make_shared<op::v0::NormalizeL2>(A, axes, eps, ngraph::op::EpsMode::MAX),
ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
copy_data(a, vector<float>{1, 2, 3, 4});
auto result = backend->create_tensor(element::f32, shape);
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_TRUE(test::all_close_f((vector<float>{0.18257419, 0.36514837, 0.54772256, 0.7302967}),
read_vector<float>(result)));
}
NGRAPH_TEST(${BACKEND_NAME}, normalize_l2_zero_mode_max)
{
Shape shape{2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto axes = make_shared<op::Constant>(element::i64, Shape{}, vector<int64_t>{0});
float eps = 1e-7;
auto f = make_shared<Function>(
make_shared<op::v0::NormalizeL2>(A, axes, eps, ngraph::op::EpsMode::MAX),
ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
copy_data(a, vector<float>{1, 2, 3, 4});
auto result = backend->create_tensor(element::f32, shape);
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_TRUE(test::all_close_f((vector<float>{0.31622777, 0.4472136, 0.9486833, 0.89442719}),
read_vector<float>(result)));
}
NGRAPH_TEST(${BACKEND_NAME}, normalize_l2_one_mode_max)
{
Shape shape{2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto axes = make_shared<op::Constant>(element::i64, Shape{}, vector<int64_t>{1});
float eps = 1e-7;
auto f = make_shared<Function>(
make_shared<op::v0::NormalizeL2>(A, axes, eps, ngraph::op::EpsMode::MAX),
ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
copy_data(a, vector<float>{1, 2, 3, 4});
auto result = backend->create_tensor(element::f32, shape);
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_TRUE(test::all_close_f((vector<float>{0.4472136, 0.89442719, 0.6, 0.8}),
read_vector<float>(result)));
}

View File

@@ -336,6 +336,12 @@ max_3d_eliminate_zero_dim
lrn_across_empty
lrn_2d_across_empty
normalize_across_empty_axes_input
normalize_l2_all_mode_add
normalize_l2_none_mode_add
normalize_l2_zero_mode_add
normalize_l2_all_mode_max
normalize_l2_none_mode_max
normalize_l2_zero_mode_max
squeeze_default_axes
dynamic_abc
broadcast_v1
@@ -1288,6 +1294,12 @@ IE_GPU.node_name
IE_GPU.negative
IE_GPU.negative_i32
IE_GPU.negative_f32
IE_GPU.normalize_l2_all_mode_add
IE_GPU.normalize_l2_none_mode_add
IE_GPU.normalize_l2_zero_mode_add
IE_GPU.normalize_l2_all_mode_max
IE_GPU.normalize_l2_none_mode_max
IE_GPU.normalize_l2_zero_mode_max
IE_GPU.multiply
IE_GPU.multiply_overload
IE_GPU.multiple_backends

View File

@@ -67,8 +67,9 @@ runtime::interpreter::INTExecutable::INTExecutable(const shared_ptr<Function>& f
{
case OP_TYPEID::Clamp:
case OP_TYPEID::MatMul:
case OP_TYPEID::Squeeze:
case OP_TYPEID::NormalizeL2:
case OP_TYPEID::PRelu:
case OP_TYPEID::Squeeze:
case OP_TYPEID::Unsqueeze: retval = true; break;
default: break;
}

View File

@@ -70,6 +70,7 @@
#include "ngraph/runtime/reference/max_pool.hpp"
#include "ngraph/runtime/reference/min.hpp"
#include "ngraph/runtime/reference/negate.hpp"
#include "ngraph/runtime/reference/normalize_l2.hpp"
#include "ngraph/runtime/reference/not.hpp"
#include "ngraph/runtime/reference/one_hot.hpp"
#include "ngraph/runtime/reference/pad.hpp"
@@ -1372,6 +1373,17 @@ protected:
args[1]->get_element_type());
break;
}
case OP_TYPEID::NormalizeL2:
{
const op::NormalizeL2* norm = static_cast<const op::NormalizeL2*>(&node);
reference::normalize_l2<T>(args[0]->get_data_ptr<const T>(),
out[0]->get_data_ptr<T>(),
node.get_input_shape(0),
norm->get_reduction_axes(),
norm->get_eps(),
norm->get_eps_mode());
break;
}
// Fused Ops are not supported in interpreter. They need to be decomposed before execution
case OP_TYPEID::DepthToSpace:
@@ -1384,7 +1396,6 @@ protected:
case OP_TYPEID::HardSigmoid:
case OP_TYPEID::Interpolate:
case OP_TYPEID::MVN:
case OP_TYPEID::NormalizeL2:
case OP_TYPEID::PRelu:
case OP_TYPEID::ScatterUpdate_v3:
case OP_TYPEID::Selu: