diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/normalize_l2.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/normalize_l2.cpp new file mode 100644 index 00000000000..0df1e6573cf --- /dev/null +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/normalize_l2.cpp @@ -0,0 +1,43 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "single_layer_tests/normalize_l2.hpp" + +using namespace LayerTestsDefinitions; + +namespace { +const std::vector netPrecisions = { + InferenceEngine::Precision::FP32, + InferenceEngine::Precision::FP16 +}; + +const std::vector> axes = { + {}, + {1}, +}; +const std::vector eps = {1e-7f, 1e-6f, 1e-5f, 1e-4f}; + +const std::vector epsMode = { + ngraph::op::EpsMode::ADD, + ngraph::op::EpsMode::MAX, +}; + +const auto normL2params = testing::Combine( + testing::ValuesIn(axes), + testing::ValuesIn(eps), + testing::ValuesIn(epsMode), + testing::Values(std::vector{1, 3, 10, 5}), + testing::ValuesIn(netPrecisions), + testing::Values(CommonTestUtils::DEVICE_CPU) +); + +INSTANTIATE_TEST_CASE_P( + NormalizeL2, + NormalizeL2LayerTest, + normL2params, + NormalizeL2LayerTest::getTestCaseName +); +} // namespace diff --git a/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/normalize_l2.hpp b/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/normalize_l2.hpp new file mode 100644 index 00000000000..8f867765fc2 --- /dev/null +++ b/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/normalize_l2.hpp @@ -0,0 +1,35 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include + +#include "functional_test_utils/layer_test_utils.hpp" +#include "ngraph_functions/builders.hpp" + + +namespace LayerTestsDefinitions { + +using NormalizeL2LayerTestParams = std::tuple< + std::vector, // axes + float, // eps + ngraph::op::EpsMode, // eps_mode + InferenceEngine::SizeVector, // inputShape + InferenceEngine::Precision, // netPrecision + std::string // targetDevice +>; + +class NormalizeL2LayerTest : public testing::WithParamInterface, + virtual public LayerTestsUtils::LayerTestsCommon { +public: + static std::string getTestCaseName(testing::TestParamInfo obj); + +protected: + void SetUp() override; +}; + +} // namespace LayerTestsDefinitions diff --git a/inference-engine/tests/functional/plugin/shared/src/single_layer_tests/normalize_l2.cpp b/inference-engine/tests/functional/plugin/shared/src/single_layer_tests/normalize_l2.cpp new file mode 100644 index 00000000000..3d6c472e3fc --- /dev/null +++ b/inference-engine/tests/functional/plugin/shared/src/single_layer_tests/normalize_l2.cpp @@ -0,0 +1,47 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "single_layer_tests/normalize_l2.hpp" + + +namespace LayerTestsDefinitions { + +std::string NormalizeL2LayerTest::getTestCaseName(testing::TestParamInfo obj) { + std::vector axes; + float eps; + ngraph::op::EpsMode epsMode; + InferenceEngine::SizeVector inputShape; + InferenceEngine::Precision netPrecision; + std::string targetDevice; + std::tie(axes, eps, epsMode, inputShape, netPrecision, targetDevice) = obj.param; + + std::ostringstream result; + result << "IS=" << CommonTestUtils::vec2str(inputShape) << "_"; + result << "axes=" << CommonTestUtils::vec2str(axes) << "_"; + result << "eps=" << eps << "_"; + result << "epsMode=" << epsMode << "_"; + result << "netPRC=" << netPrecision.name() << "_"; + result << "targetDevice=" << targetDevice; + return result.str(); +} + +void NormalizeL2LayerTest::SetUp() { + InferenceEngine::SizeVector inputShape; + std::vector axes; + float eps; + ngraph::op::EpsMode epsMode; + InferenceEngine::Precision netPrecision; + std::tie(axes, eps, epsMode, inputShape, netPrecision, targetDevice) = this->GetParam(); + auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); + auto params = ngraph::builder::makeParams(ngPrc, {inputShape}); + auto norm = ngraph::builder::makeNormalizeL2(params[0], axes, eps, epsMode); + ngraph::ResultVector results{std::make_shared(norm)}; + function = std::make_shared(results, params, "NormalizeL2"); +} + +TEST_P(NormalizeL2LayerTest, CompareWithRefs) { + Run(); +} + +} // namespace LayerTestsDefinitions diff --git a/inference-engine/tests/ngraph_functions/include/ngraph_functions/builders.hpp b/inference-engine/tests/ngraph_functions/include/ngraph_functions/builders.hpp index 12b20cccdea..f912561ed5e 100644 --- a/inference-engine/tests/ngraph_functions/include/ngraph_functions/builders.hpp +++ b/inference-engine/tests/ngraph_functions/include/ngraph_functions/builders.hpp @@ -428,5 +428,10 @@ std::shared_ptr makeRNN(const OutputVector& in, std::shared_ptr makeTile(const ngraph::Output& in, const std::vector& repeats); + +std::shared_ptr makeNormalizeL2(const ngraph::Output& data, + const std::vector& axes, + float eps, + ngraph::op::EpsMode epsMode); } // namespace builder } // namespace ngraph diff --git a/inference-engine/tests/ngraph_functions/src/normalize_l2.cpp b/inference-engine/tests/ngraph_functions/src/normalize_l2.cpp new file mode 100644 index 00000000000..38c4c4a021e --- /dev/null +++ b/inference-engine/tests/ngraph_functions/src/normalize_l2.cpp @@ -0,0 +1,20 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include + +#include "ngraph_functions/builders.hpp" + +namespace ngraph { +namespace builder { +std::shared_ptr makeNormalizeL2(const ngraph::Output& data, + const std::vector& axes, + float eps, + ngraph::op::EpsMode epsMode) { + auto normAxes = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{axes.size()}, axes); + return std::make_shared(data, normAxes, eps, epsMode); +} +} // namespace builder +} // namespace ngraph diff --git a/ngraph/core/reference/include/ngraph/runtime/reference/normalize_l2.hpp b/ngraph/core/reference/include/ngraph/runtime/reference/normalize_l2.hpp new file mode 100644 index 00000000000..51f2ff12276 --- /dev/null +++ b/ngraph/core/reference/include/ngraph/runtime/reference/normalize_l2.hpp @@ -0,0 +1,69 @@ +//***************************************************************************** +// Copyright 2017-2020 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** + +#pragma once + +namespace ngraph +{ + namespace runtime + { + namespace reference + { + template + void normalize_l2(const T* data, + T* out, + const Shape& data_shape, + const AxisSet& reduction_axes, + float eps, + op::EpsMode eps_mode) + { + AxisSet axes = reduction_axes; + if (reduction_axes.empty()) + { + std::vector axes_vec(data_shape.size()); + std::iota(axes_vec.begin(), axes_vec.end(), 0); + axes = AxisSet(axes_vec); + } + std::vector sqr_data(shape_size(data_shape)); + for (size_t i = 0; i < shape_size(data_shape); i++) + { + sqr_data[i] = data[i] * data[i]; + } + + Shape reduce_shape = data_shape; + for (auto axis : axes) + { + reduce_shape[axis] = 1; + } + + std::vector sum_data(shape_size(reduce_shape)); + sum(sqr_data.data(), sum_data.data(), data_shape, axes, true); + autobroadcast_binop(data, + sum_data.data(), + out, + data_shape, + reduce_shape, + op::AutoBroadcastSpec(op::AutoBroadcastType::NUMPY), + [&eps, &eps_mode](T x, T y) -> T { + T arg = (eps_mode == op::EpsMode::ADD) + ? y + eps + : std::max(y, static_cast(eps)); + return x / std::sqrt(arg); + }); + } + } // namespace reference + } // namespace runtime +} // namespace ngraph diff --git a/ngraph/test/CMakeLists.txt b/ngraph/test/CMakeLists.txt index ff8788674e3..253c1e97370 100644 --- a/ngraph/test/CMakeLists.txt +++ b/ngraph/test/CMakeLists.txt @@ -303,6 +303,7 @@ set(MULTI_TEST_SRC backend/multiply.in.cpp backend/negative.in.cpp backend/node_name.in.cpp + backend/normalize_l2.in.cpp backend/not.in.cpp backend/non_zero.in.cpp backend/numeric.in.cpp diff --git a/ngraph/test/backend/fused_op.in.cpp b/ngraph/test/backend/fused_op.in.cpp index 7544b61afc9..d160686daf7 100644 --- a/ngraph/test/backend/fused_op.in.cpp +++ b/ngraph/test/backend/fused_op.in.cpp @@ -502,9 +502,13 @@ NGRAPH_TEST(${BACKEND_NAME}, normalize_across_empty_axes_input) test_case.add_input(input_data); - // output should be filled with 1f values - test_case.add_expected_output(data_shape, vector(shape_size(data_shape), 1)); + test_case.add_expected_output( + data_shape, + vector{0.01428571, 0.02857143, 0.04285714, 0.05714286, 0.07142857, 0.08571429, + 0.1, 0.11428571, 0.12857144, 0.14285715, 0.15714286, 0.17142858, + 0.18571429, 0.2, 0.21428572, 0.22857143, 0.24285714, 0.25714287, + 0.27142859, 0.2857143, 0.3, 0.31428573, 0.32857144, 0.34285715}); test_case.run(DEFAULT_FLOAT_TOLERANCE_BITS + 1); } diff --git a/ngraph/test/backend/normalize_l2.in.cpp b/ngraph/test/backend/normalize_l2.in.cpp new file mode 100644 index 00000000000..77e0415e632 --- /dev/null +++ b/ngraph/test/backend/normalize_l2.in.cpp @@ -0,0 +1,225 @@ +//***************************************************************************** +// Copyright 2017-2020 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** + +#include +#include +#include +#include +#include +#include +#include + +#include "gtest/gtest.h" +#include "ngraph/ngraph.hpp" +#include "util/all_close.hpp" +#include "util/all_close_f.hpp" +#include "util/ndarray.hpp" +#include "util/random.hpp" +#include "util/test_control.hpp" +#include "util/test_tools.hpp" + +using namespace std; +using namespace ngraph; + +static string s_manifest = "${MANIFEST}"; + +// ----------------------- eps_mode = ngraph::op::EpsMode::ADD ----------------------- // + +NGRAPH_TEST(${BACKEND_NAME}, normalize_l2_all_mode_add) +{ + Shape shape{2, 2}; + auto A = make_shared(element::f32, shape); + auto axes = make_shared(element::i64, Shape{2}, vector{0, 1}); + float eps = 1e-7; + auto f = make_shared( + make_shared(A, axes, eps, ngraph::op::EpsMode::ADD), + ParameterVector{A}); + + auto backend = runtime::Backend::create("${BACKEND_NAME}"); + + // Create some tensors for input/output + auto a = backend->create_tensor(element::f32, shape); + copy_data(a, vector{1, 2, 3, 4}); + auto result = backend->create_tensor(element::f32, shape); + + auto handle = backend->compile(f); + handle->call_with_validate({result}, {a}); + EXPECT_TRUE(test::all_close_f((vector{0.18257418, 0.36514837, 0.5477226, 0.73029673}), + read_vector(result))); +} + +NGRAPH_TEST(${BACKEND_NAME}, normalize_l2_none_mode_add) +{ + Shape shape{2, 2}; + auto A = make_shared(element::f32, shape); + auto axes = make_shared(element::i64, Shape{0}, vector{}); + float eps = 1e-7; + auto f = make_shared( + make_shared(A, axes, eps, ngraph::op::EpsMode::ADD), + ParameterVector{A}); + + auto backend = runtime::Backend::create("${BACKEND_NAME}"); + + // Create some tensors for input/output + auto a = backend->create_tensor(element::f32, shape); + copy_data(a, vector{1, 2, 3, 4}); + auto result = backend->create_tensor(element::f32, shape); + + auto handle = backend->compile(f); + handle->call_with_validate({result}, {a}); + EXPECT_TRUE(test::all_close_f((vector{0.18257418, 0.36514837, 0.5477226, 0.73029673}), + read_vector(result))); +} + +NGRAPH_TEST(${BACKEND_NAME}, normalize_l2_zero_mode_add) +{ + Shape shape{2, 2}; + auto A = make_shared(element::f32, shape); + auto axes = make_shared(element::i64, Shape{}, vector{0}); + float eps = 1e-7; + auto f = make_shared( + make_shared(A, axes, eps, ngraph::op::EpsMode::ADD), + ParameterVector{A}); + + auto backend = runtime::Backend::create("${BACKEND_NAME}"); + + // Create some tensors for input/output + auto a = backend->create_tensor(element::f32, shape); + copy_data(a, vector{1, 2, 3, 4}); + auto result = backend->create_tensor(element::f32, shape); + + auto handle = backend->compile(f); + handle->call_with_validate({result}, {a}); + EXPECT_TRUE(test::all_close_f((vector{0.31622776, 0.4472136, 0.94868326, 0.8944272}), + read_vector(result))); +} + +NGRAPH_TEST(${BACKEND_NAME}, normalize_l2_one_mode_add) +{ + Shape shape{2, 2}; + auto A = make_shared(element::f32, shape); + auto axes = make_shared(element::i64, Shape{}, vector{1}); + float eps = 1e-7; + auto f = make_shared( + make_shared(A, axes, eps, ngraph::op::EpsMode::ADD), + ParameterVector{A}); + + auto backend = runtime::Backend::create("${BACKEND_NAME}"); + + // Create some tensors for input/output + auto a = backend->create_tensor(element::f32, shape); + copy_data(a, vector{1, 2, 3, 4}); + auto result = backend->create_tensor(element::f32, shape); + + auto handle = backend->compile(f); + handle->call_with_validate({result}, {a}); + EXPECT_TRUE(test::all_close_f((vector{0.4472136, 0.8944272, 0.6, 0.8}), + read_vector(result))); +} + +// ----------------------- eps_mode = ngraph::op::EpsMode::MAX ----------------------- // + +NGRAPH_TEST(${BACKEND_NAME}, normalize_l2_all_mode_max) +{ + Shape shape{2, 2}; + auto A = make_shared(element::f32, shape); + auto axes = make_shared(element::i64, Shape{2}, vector{0, 1}); + float eps = 1e-7; + auto f = make_shared( + make_shared(A, axes, eps, ngraph::op::EpsMode::ADD), + ParameterVector{A}); + + auto backend = runtime::Backend::create("${BACKEND_NAME}"); + + // Create some tensors for input/output + auto a = backend->create_tensor(element::f32, shape); + copy_data(a, vector{1, 2, 3, 4}); + auto result = backend->create_tensor(element::f32, shape); + + auto handle = backend->compile(f); + handle->call_with_validate({result}, {a}); + EXPECT_TRUE(test::all_close_f((vector{0.18257419, 0.36514837, 0.54772256, 0.73029674}), + read_vector(result))); +} + +NGRAPH_TEST(${BACKEND_NAME}, normalize_l2_none_mode_max) +{ + Shape shape{2, 2}; + auto A = make_shared(element::f32, shape); + auto axes = make_shared(element::i64, Shape{0}, vector{}); + float eps = 1e-7; + auto f = make_shared( + make_shared(A, axes, eps, ngraph::op::EpsMode::MAX), + ParameterVector{A}); + + auto backend = runtime::Backend::create("${BACKEND_NAME}"); + + // Create some tensors for input/output + auto a = backend->create_tensor(element::f32, shape); + copy_data(a, vector{1, 2, 3, 4}); + auto result = backend->create_tensor(element::f32, shape); + + auto handle = backend->compile(f); + handle->call_with_validate({result}, {a}); + EXPECT_TRUE(test::all_close_f((vector{0.18257419, 0.36514837, 0.54772256, 0.7302967}), + read_vector(result))); +} + +NGRAPH_TEST(${BACKEND_NAME}, normalize_l2_zero_mode_max) +{ + Shape shape{2, 2}; + auto A = make_shared(element::f32, shape); + auto axes = make_shared(element::i64, Shape{}, vector{0}); + float eps = 1e-7; + auto f = make_shared( + make_shared(A, axes, eps, ngraph::op::EpsMode::MAX), + ParameterVector{A}); + + auto backend = runtime::Backend::create("${BACKEND_NAME}"); + + // Create some tensors for input/output + auto a = backend->create_tensor(element::f32, shape); + copy_data(a, vector{1, 2, 3, 4}); + auto result = backend->create_tensor(element::f32, shape); + + auto handle = backend->compile(f); + handle->call_with_validate({result}, {a}); + EXPECT_TRUE(test::all_close_f((vector{0.31622777, 0.4472136, 0.9486833, 0.89442719}), + read_vector(result))); +} + +NGRAPH_TEST(${BACKEND_NAME}, normalize_l2_one_mode_max) +{ + Shape shape{2, 2}; + auto A = make_shared(element::f32, shape); + auto axes = make_shared(element::i64, Shape{}, vector{1}); + float eps = 1e-7; + auto f = make_shared( + make_shared(A, axes, eps, ngraph::op::EpsMode::MAX), + ParameterVector{A}); + + auto backend = runtime::Backend::create("${BACKEND_NAME}"); + + // Create some tensors for input/output + auto a = backend->create_tensor(element::f32, shape); + copy_data(a, vector{1, 2, 3, 4}); + auto result = backend->create_tensor(element::f32, shape); + + auto handle = backend->compile(f); + handle->call_with_validate({result}, {a}); + EXPECT_TRUE(test::all_close_f((vector{0.4472136, 0.89442719, 0.6, 0.8}), + read_vector(result))); +} diff --git a/ngraph/test/runtime/ie/unit_test.manifest b/ngraph/test/runtime/ie/unit_test.manifest index 51e154a4f31..0ab40582e5a 100644 --- a/ngraph/test/runtime/ie/unit_test.manifest +++ b/ngraph/test/runtime/ie/unit_test.manifest @@ -336,6 +336,12 @@ max_3d_eliminate_zero_dim lrn_across_empty lrn_2d_across_empty normalize_across_empty_axes_input +normalize_l2_all_mode_add +normalize_l2_none_mode_add +normalize_l2_zero_mode_add +normalize_l2_all_mode_max +normalize_l2_none_mode_max +normalize_l2_zero_mode_max squeeze_default_axes dynamic_abc broadcast_v1 @@ -1288,6 +1294,12 @@ IE_GPU.node_name IE_GPU.negative IE_GPU.negative_i32 IE_GPU.negative_f32 +IE_GPU.normalize_l2_all_mode_add +IE_GPU.normalize_l2_none_mode_add +IE_GPU.normalize_l2_zero_mode_add +IE_GPU.normalize_l2_all_mode_max +IE_GPU.normalize_l2_none_mode_max +IE_GPU.normalize_l2_zero_mode_max IE_GPU.multiply IE_GPU.multiply_overload IE_GPU.multiple_backends diff --git a/ngraph/test/runtime/interpreter/int_executable.cpp b/ngraph/test/runtime/interpreter/int_executable.cpp index d23f6c2bf23..9b54f73215f 100644 --- a/ngraph/test/runtime/interpreter/int_executable.cpp +++ b/ngraph/test/runtime/interpreter/int_executable.cpp @@ -67,8 +67,9 @@ runtime::interpreter::INTExecutable::INTExecutable(const shared_ptr& f { case OP_TYPEID::Clamp: case OP_TYPEID::MatMul: - case OP_TYPEID::Squeeze: + case OP_TYPEID::NormalizeL2: case OP_TYPEID::PRelu: + case OP_TYPEID::Squeeze: case OP_TYPEID::Unsqueeze: retval = true; break; default: break; } diff --git a/ngraph/test/runtime/interpreter/int_executable.hpp b/ngraph/test/runtime/interpreter/int_executable.hpp index ac026ffc8cd..0bc9000f217 100644 --- a/ngraph/test/runtime/interpreter/int_executable.hpp +++ b/ngraph/test/runtime/interpreter/int_executable.hpp @@ -70,6 +70,7 @@ #include "ngraph/runtime/reference/max_pool.hpp" #include "ngraph/runtime/reference/min.hpp" #include "ngraph/runtime/reference/negate.hpp" +#include "ngraph/runtime/reference/normalize_l2.hpp" #include "ngraph/runtime/reference/not.hpp" #include "ngraph/runtime/reference/one_hot.hpp" #include "ngraph/runtime/reference/pad.hpp" @@ -1372,6 +1373,17 @@ protected: args[1]->get_element_type()); break; } + case OP_TYPEID::NormalizeL2: + { + const op::NormalizeL2* norm = static_cast(&node); + reference::normalize_l2(args[0]->get_data_ptr(), + out[0]->get_data_ptr(), + node.get_input_shape(0), + norm->get_reduction_axes(), + norm->get_eps(), + norm->get_eps_mode()); + break; + } // Fused Ops are not supported in interpreter. They need to be decomposed before execution case OP_TYPEID::DepthToSpace: @@ -1384,7 +1396,6 @@ protected: case OP_TYPEID::HardSigmoid: case OP_TYPEID::Interpolate: case OP_TYPEID::MVN: - case OP_TYPEID::NormalizeL2: case OP_TYPEID::PRelu: case OP_TYPEID::ScatterUpdate_v3: case OP_TYPEID::Selu: