[IE Tests] Added NormalizeL2 tests (#2327)
* Added NormalizeL2 tests * Added NormalizeL2 reference * Add nGraph tests * Fix tests * Added NormalizeL2 builder
This commit is contained in:
parent
8062f20c15
commit
7f78dd797e
@ -0,0 +1,43 @@
|
||||
// Copyright (C) 2020 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "single_layer_tests/normalize_l2.hpp"
|
||||
|
||||
using namespace LayerTestsDefinitions;
|
||||
|
||||
namespace {
|
||||
const std::vector<InferenceEngine::Precision> netPrecisions = {
|
||||
InferenceEngine::Precision::FP32,
|
||||
InferenceEngine::Precision::FP16
|
||||
};
|
||||
|
||||
const std::vector<std::vector<int64_t>> axes = {
|
||||
{},
|
||||
{1},
|
||||
};
|
||||
const std::vector<float> eps = {1e-7f, 1e-6f, 1e-5f, 1e-4f};
|
||||
|
||||
const std::vector<ngraph::op::EpsMode> epsMode = {
|
||||
ngraph::op::EpsMode::ADD,
|
||||
ngraph::op::EpsMode::MAX,
|
||||
};
|
||||
|
||||
const auto normL2params = testing::Combine(
|
||||
testing::ValuesIn(axes),
|
||||
testing::ValuesIn(eps),
|
||||
testing::ValuesIn(epsMode),
|
||||
testing::Values(std::vector<size_t>{1, 3, 10, 5}),
|
||||
testing::ValuesIn(netPrecisions),
|
||||
testing::Values(CommonTestUtils::DEVICE_CPU)
|
||||
);
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(
|
||||
NormalizeL2,
|
||||
NormalizeL2LayerTest,
|
||||
normL2params,
|
||||
NormalizeL2LayerTest::getTestCaseName
|
||||
);
|
||||
} // namespace
|
@ -0,0 +1,35 @@
|
||||
// Copyright (C) 2020 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <string>
|
||||
#include <tuple>
|
||||
#include <vector>
|
||||
|
||||
#include "functional_test_utils/layer_test_utils.hpp"
|
||||
#include "ngraph_functions/builders.hpp"
|
||||
|
||||
|
||||
namespace LayerTestsDefinitions {
|
||||
|
||||
using NormalizeL2LayerTestParams = std::tuple<
|
||||
std::vector<int64_t>, // axes
|
||||
float, // eps
|
||||
ngraph::op::EpsMode, // eps_mode
|
||||
InferenceEngine::SizeVector, // inputShape
|
||||
InferenceEngine::Precision, // netPrecision
|
||||
std::string // targetDevice
|
||||
>;
|
||||
|
||||
class NormalizeL2LayerTest : public testing::WithParamInterface<NormalizeL2LayerTestParams>,
|
||||
virtual public LayerTestsUtils::LayerTestsCommon {
|
||||
public:
|
||||
static std::string getTestCaseName(testing::TestParamInfo<NormalizeL2LayerTestParams> obj);
|
||||
|
||||
protected:
|
||||
void SetUp() override;
|
||||
};
|
||||
|
||||
} // namespace LayerTestsDefinitions
|
@ -0,0 +1,47 @@
|
||||
// Copyright (C) 2020 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "single_layer_tests/normalize_l2.hpp"
|
||||
|
||||
|
||||
namespace LayerTestsDefinitions {
|
||||
|
||||
std::string NormalizeL2LayerTest::getTestCaseName(testing::TestParamInfo<NormalizeL2LayerTestParams> obj) {
|
||||
std::vector<int64_t> axes;
|
||||
float eps;
|
||||
ngraph::op::EpsMode epsMode;
|
||||
InferenceEngine::SizeVector inputShape;
|
||||
InferenceEngine::Precision netPrecision;
|
||||
std::string targetDevice;
|
||||
std::tie(axes, eps, epsMode, inputShape, netPrecision, targetDevice) = obj.param;
|
||||
|
||||
std::ostringstream result;
|
||||
result << "IS=" << CommonTestUtils::vec2str(inputShape) << "_";
|
||||
result << "axes=" << CommonTestUtils::vec2str(axes) << "_";
|
||||
result << "eps=" << eps << "_";
|
||||
result << "epsMode=" << epsMode << "_";
|
||||
result << "netPRC=" << netPrecision.name() << "_";
|
||||
result << "targetDevice=" << targetDevice;
|
||||
return result.str();
|
||||
}
|
||||
|
||||
void NormalizeL2LayerTest::SetUp() {
|
||||
InferenceEngine::SizeVector inputShape;
|
||||
std::vector<int64_t> axes;
|
||||
float eps;
|
||||
ngraph::op::EpsMode epsMode;
|
||||
InferenceEngine::Precision netPrecision;
|
||||
std::tie(axes, eps, epsMode, inputShape, netPrecision, targetDevice) = this->GetParam();
|
||||
auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
|
||||
auto params = ngraph::builder::makeParams(ngPrc, {inputShape});
|
||||
auto norm = ngraph::builder::makeNormalizeL2(params[0], axes, eps, epsMode);
|
||||
ngraph::ResultVector results{std::make_shared<ngraph::opset4::Result>(norm)};
|
||||
function = std::make_shared<ngraph::Function>(results, params, "NormalizeL2");
|
||||
}
|
||||
|
||||
TEST_P(NormalizeL2LayerTest, CompareWithRefs) {
|
||||
Run();
|
||||
}
|
||||
|
||||
} // namespace LayerTestsDefinitions
|
@ -428,5 +428,10 @@ std::shared_ptr<ngraph::Node> makeRNN(const OutputVector& in,
|
||||
|
||||
std::shared_ptr<ngraph::Node> makeTile(const ngraph::Output<Node>& in,
|
||||
const std::vector<size_t>& repeats);
|
||||
|
||||
std::shared_ptr<ngraph::Node> makeNormalizeL2(const ngraph::Output<Node>& data,
|
||||
const std::vector<int64_t>& axes,
|
||||
float eps,
|
||||
ngraph::op::EpsMode epsMode);
|
||||
} // namespace builder
|
||||
} // namespace ngraph
|
||||
|
20
inference-engine/tests/ngraph_functions/src/normalize_l2.cpp
Normal file
20
inference-engine/tests/ngraph_functions/src/normalize_l2.cpp
Normal file
@ -0,0 +1,20 @@
|
||||
// Copyright (C) 2020 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <vector>
|
||||
#include <memory>
|
||||
|
||||
#include "ngraph_functions/builders.hpp"
|
||||
|
||||
namespace ngraph {
|
||||
namespace builder {
|
||||
std::shared_ptr<ngraph::Node> makeNormalizeL2(const ngraph::Output<Node>& data,
|
||||
const std::vector<int64_t>& axes,
|
||||
float eps,
|
||||
ngraph::op::EpsMode epsMode) {
|
||||
auto normAxes = std::make_shared<ngraph::opset4::Constant>(ngraph::element::Type_t::i64, ngraph::Shape{axes.size()}, axes);
|
||||
return std::make_shared<ngraph::opset4::NormalizeL2>(data, normAxes, eps, epsMode);
|
||||
}
|
||||
} // namespace builder
|
||||
} // namespace ngraph
|
@ -0,0 +1,69 @@
|
||||
//*****************************************************************************
|
||||
// Copyright 2017-2020 Intel Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//*****************************************************************************
|
||||
|
||||
#pragma once
|
||||
|
||||
namespace ngraph
|
||||
{
|
||||
namespace runtime
|
||||
{
|
||||
namespace reference
|
||||
{
|
||||
template <typename T>
|
||||
void normalize_l2(const T* data,
|
||||
T* out,
|
||||
const Shape& data_shape,
|
||||
const AxisSet& reduction_axes,
|
||||
float eps,
|
||||
op::EpsMode eps_mode)
|
||||
{
|
||||
AxisSet axes = reduction_axes;
|
||||
if (reduction_axes.empty())
|
||||
{
|
||||
std::vector<size_t> axes_vec(data_shape.size());
|
||||
std::iota(axes_vec.begin(), axes_vec.end(), 0);
|
||||
axes = AxisSet(axes_vec);
|
||||
}
|
||||
std::vector<T> sqr_data(shape_size(data_shape));
|
||||
for (size_t i = 0; i < shape_size(data_shape); i++)
|
||||
{
|
||||
sqr_data[i] = data[i] * data[i];
|
||||
}
|
||||
|
||||
Shape reduce_shape = data_shape;
|
||||
for (auto axis : axes)
|
||||
{
|
||||
reduce_shape[axis] = 1;
|
||||
}
|
||||
|
||||
std::vector<T> sum_data(shape_size(reduce_shape));
|
||||
sum(sqr_data.data(), sum_data.data(), data_shape, axes, true);
|
||||
autobroadcast_binop(data,
|
||||
sum_data.data(),
|
||||
out,
|
||||
data_shape,
|
||||
reduce_shape,
|
||||
op::AutoBroadcastSpec(op::AutoBroadcastType::NUMPY),
|
||||
[&eps, &eps_mode](T x, T y) -> T {
|
||||
T arg = (eps_mode == op::EpsMode::ADD)
|
||||
? y + eps
|
||||
: std::max(y, static_cast<T>(eps));
|
||||
return x / std::sqrt(arg);
|
||||
});
|
||||
}
|
||||
} // namespace reference
|
||||
} // namespace runtime
|
||||
} // namespace ngraph
|
@ -303,6 +303,7 @@ set(MULTI_TEST_SRC
|
||||
backend/multiply.in.cpp
|
||||
backend/negative.in.cpp
|
||||
backend/node_name.in.cpp
|
||||
backend/normalize_l2.in.cpp
|
||||
backend/not.in.cpp
|
||||
backend/non_zero.in.cpp
|
||||
backend/numeric.in.cpp
|
||||
|
@ -502,9 +502,13 @@ NGRAPH_TEST(${BACKEND_NAME}, normalize_across_empty_axes_input)
|
||||
|
||||
test_case.add_input<float>(input_data);
|
||||
|
||||
// output should be filled with 1f values
|
||||
test_case.add_expected_output<float>(data_shape, vector<float>(shape_size(data_shape), 1));
|
||||
test_case.add_expected_output<float>(
|
||||
data_shape,
|
||||
vector<float>{0.01428571, 0.02857143, 0.04285714, 0.05714286, 0.07142857, 0.08571429,
|
||||
0.1, 0.11428571, 0.12857144, 0.14285715, 0.15714286, 0.17142858,
|
||||
|
||||
0.18571429, 0.2, 0.21428572, 0.22857143, 0.24285714, 0.25714287,
|
||||
0.27142859, 0.2857143, 0.3, 0.31428573, 0.32857144, 0.34285715});
|
||||
test_case.run(DEFAULT_FLOAT_TOLERANCE_BITS + 1);
|
||||
}
|
||||
|
||||
|
225
ngraph/test/backend/normalize_l2.in.cpp
Normal file
225
ngraph/test/backend/normalize_l2.in.cpp
Normal file
@ -0,0 +1,225 @@
|
||||
//*****************************************************************************
|
||||
// Copyright 2017-2020 Intel Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//*****************************************************************************
|
||||
|
||||
#include <algorithm>
|
||||
#include <cinttypes>
|
||||
#include <cmath>
|
||||
#include <cstdlib>
|
||||
#include <numeric>
|
||||
#include <random>
|
||||
#include <string>
|
||||
|
||||
#include "gtest/gtest.h"
|
||||
#include "ngraph/ngraph.hpp"
|
||||
#include "util/all_close.hpp"
|
||||
#include "util/all_close_f.hpp"
|
||||
#include "util/ndarray.hpp"
|
||||
#include "util/random.hpp"
|
||||
#include "util/test_control.hpp"
|
||||
#include "util/test_tools.hpp"
|
||||
|
||||
using namespace std;
|
||||
using namespace ngraph;
|
||||
|
||||
static string s_manifest = "${MANIFEST}";
|
||||
|
||||
// ----------------------- eps_mode = ngraph::op::EpsMode::ADD ----------------------- //
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, normalize_l2_all_mode_add)
|
||||
{
|
||||
Shape shape{2, 2};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape);
|
||||
auto axes = make_shared<op::Constant>(element::i64, Shape{2}, vector<int64_t>{0, 1});
|
||||
float eps = 1e-7;
|
||||
auto f = make_shared<Function>(
|
||||
make_shared<op::v0::NormalizeL2>(A, axes, eps, ngraph::op::EpsMode::ADD),
|
||||
ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::f32, shape);
|
||||
copy_data(a, vector<float>{1, 2, 3, 4});
|
||||
auto result = backend->create_tensor(element::f32, shape);
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{0.18257418, 0.36514837, 0.5477226, 0.73029673}),
|
||||
read_vector<float>(result)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, normalize_l2_none_mode_add)
|
||||
{
|
||||
Shape shape{2, 2};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape);
|
||||
auto axes = make_shared<op::Constant>(element::i64, Shape{0}, vector<int64_t>{});
|
||||
float eps = 1e-7;
|
||||
auto f = make_shared<Function>(
|
||||
make_shared<op::v0::NormalizeL2>(A, axes, eps, ngraph::op::EpsMode::ADD),
|
||||
ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::f32, shape);
|
||||
copy_data(a, vector<float>{1, 2, 3, 4});
|
||||
auto result = backend->create_tensor(element::f32, shape);
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{0.18257418, 0.36514837, 0.5477226, 0.73029673}),
|
||||
read_vector<float>(result)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, normalize_l2_zero_mode_add)
|
||||
{
|
||||
Shape shape{2, 2};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape);
|
||||
auto axes = make_shared<op::Constant>(element::i64, Shape{}, vector<int64_t>{0});
|
||||
float eps = 1e-7;
|
||||
auto f = make_shared<Function>(
|
||||
make_shared<op::v0::NormalizeL2>(A, axes, eps, ngraph::op::EpsMode::ADD),
|
||||
ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::f32, shape);
|
||||
copy_data(a, vector<float>{1, 2, 3, 4});
|
||||
auto result = backend->create_tensor(element::f32, shape);
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{0.31622776, 0.4472136, 0.94868326, 0.8944272}),
|
||||
read_vector<float>(result)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, normalize_l2_one_mode_add)
|
||||
{
|
||||
Shape shape{2, 2};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape);
|
||||
auto axes = make_shared<op::Constant>(element::i64, Shape{}, vector<int64_t>{1});
|
||||
float eps = 1e-7;
|
||||
auto f = make_shared<Function>(
|
||||
make_shared<op::v0::NormalizeL2>(A, axes, eps, ngraph::op::EpsMode::ADD),
|
||||
ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::f32, shape);
|
||||
copy_data(a, vector<float>{1, 2, 3, 4});
|
||||
auto result = backend->create_tensor(element::f32, shape);
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{0.4472136, 0.8944272, 0.6, 0.8}),
|
||||
read_vector<float>(result)));
|
||||
}
|
||||
|
||||
// ----------------------- eps_mode = ngraph::op::EpsMode::MAX ----------------------- //
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, normalize_l2_all_mode_max)
|
||||
{
|
||||
Shape shape{2, 2};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape);
|
||||
auto axes = make_shared<op::Constant>(element::i64, Shape{2}, vector<int64_t>{0, 1});
|
||||
float eps = 1e-7;
|
||||
auto f = make_shared<Function>(
|
||||
make_shared<op::v0::NormalizeL2>(A, axes, eps, ngraph::op::EpsMode::ADD),
|
||||
ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::f32, shape);
|
||||
copy_data(a, vector<float>{1, 2, 3, 4});
|
||||
auto result = backend->create_tensor(element::f32, shape);
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{0.18257419, 0.36514837, 0.54772256, 0.73029674}),
|
||||
read_vector<float>(result)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, normalize_l2_none_mode_max)
|
||||
{
|
||||
Shape shape{2, 2};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape);
|
||||
auto axes = make_shared<op::Constant>(element::i64, Shape{0}, vector<int64_t>{});
|
||||
float eps = 1e-7;
|
||||
auto f = make_shared<Function>(
|
||||
make_shared<op::v0::NormalizeL2>(A, axes, eps, ngraph::op::EpsMode::MAX),
|
||||
ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::f32, shape);
|
||||
copy_data(a, vector<float>{1, 2, 3, 4});
|
||||
auto result = backend->create_tensor(element::f32, shape);
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{0.18257419, 0.36514837, 0.54772256, 0.7302967}),
|
||||
read_vector<float>(result)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, normalize_l2_zero_mode_max)
|
||||
{
|
||||
Shape shape{2, 2};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape);
|
||||
auto axes = make_shared<op::Constant>(element::i64, Shape{}, vector<int64_t>{0});
|
||||
float eps = 1e-7;
|
||||
auto f = make_shared<Function>(
|
||||
make_shared<op::v0::NormalizeL2>(A, axes, eps, ngraph::op::EpsMode::MAX),
|
||||
ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::f32, shape);
|
||||
copy_data(a, vector<float>{1, 2, 3, 4});
|
||||
auto result = backend->create_tensor(element::f32, shape);
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{0.31622777, 0.4472136, 0.9486833, 0.89442719}),
|
||||
read_vector<float>(result)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, normalize_l2_one_mode_max)
|
||||
{
|
||||
Shape shape{2, 2};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape);
|
||||
auto axes = make_shared<op::Constant>(element::i64, Shape{}, vector<int64_t>{1});
|
||||
float eps = 1e-7;
|
||||
auto f = make_shared<Function>(
|
||||
make_shared<op::v0::NormalizeL2>(A, axes, eps, ngraph::op::EpsMode::MAX),
|
||||
ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::f32, shape);
|
||||
copy_data(a, vector<float>{1, 2, 3, 4});
|
||||
auto result = backend->create_tensor(element::f32, shape);
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{0.4472136, 0.89442719, 0.6, 0.8}),
|
||||
read_vector<float>(result)));
|
||||
}
|
@ -336,6 +336,12 @@ max_3d_eliminate_zero_dim
|
||||
lrn_across_empty
|
||||
lrn_2d_across_empty
|
||||
normalize_across_empty_axes_input
|
||||
normalize_l2_all_mode_add
|
||||
normalize_l2_none_mode_add
|
||||
normalize_l2_zero_mode_add
|
||||
normalize_l2_all_mode_max
|
||||
normalize_l2_none_mode_max
|
||||
normalize_l2_zero_mode_max
|
||||
squeeze_default_axes
|
||||
dynamic_abc
|
||||
broadcast_v1
|
||||
@ -1288,6 +1294,12 @@ IE_GPU.node_name
|
||||
IE_GPU.negative
|
||||
IE_GPU.negative_i32
|
||||
IE_GPU.negative_f32
|
||||
IE_GPU.normalize_l2_all_mode_add
|
||||
IE_GPU.normalize_l2_none_mode_add
|
||||
IE_GPU.normalize_l2_zero_mode_add
|
||||
IE_GPU.normalize_l2_all_mode_max
|
||||
IE_GPU.normalize_l2_none_mode_max
|
||||
IE_GPU.normalize_l2_zero_mode_max
|
||||
IE_GPU.multiply
|
||||
IE_GPU.multiply_overload
|
||||
IE_GPU.multiple_backends
|
||||
|
@ -67,8 +67,9 @@ runtime::interpreter::INTExecutable::INTExecutable(const shared_ptr<Function>& f
|
||||
{
|
||||
case OP_TYPEID::Clamp:
|
||||
case OP_TYPEID::MatMul:
|
||||
case OP_TYPEID::Squeeze:
|
||||
case OP_TYPEID::NormalizeL2:
|
||||
case OP_TYPEID::PRelu:
|
||||
case OP_TYPEID::Squeeze:
|
||||
case OP_TYPEID::Unsqueeze: retval = true; break;
|
||||
default: break;
|
||||
}
|
||||
|
@ -70,6 +70,7 @@
|
||||
#include "ngraph/runtime/reference/max_pool.hpp"
|
||||
#include "ngraph/runtime/reference/min.hpp"
|
||||
#include "ngraph/runtime/reference/negate.hpp"
|
||||
#include "ngraph/runtime/reference/normalize_l2.hpp"
|
||||
#include "ngraph/runtime/reference/not.hpp"
|
||||
#include "ngraph/runtime/reference/one_hot.hpp"
|
||||
#include "ngraph/runtime/reference/pad.hpp"
|
||||
@ -1372,6 +1373,17 @@ protected:
|
||||
args[1]->get_element_type());
|
||||
break;
|
||||
}
|
||||
case OP_TYPEID::NormalizeL2:
|
||||
{
|
||||
const op::NormalizeL2* norm = static_cast<const op::NormalizeL2*>(&node);
|
||||
reference::normalize_l2<T>(args[0]->get_data_ptr<const T>(),
|
||||
out[0]->get_data_ptr<T>(),
|
||||
node.get_input_shape(0),
|
||||
norm->get_reduction_axes(),
|
||||
norm->get_eps(),
|
||||
norm->get_eps_mode());
|
||||
break;
|
||||
}
|
||||
|
||||
// Fused Ops are not supported in interpreter. They need to be decomposed before execution
|
||||
case OP_TYPEID::DepthToSpace:
|
||||
@ -1384,7 +1396,6 @@ protected:
|
||||
case OP_TYPEID::HardSigmoid:
|
||||
case OP_TYPEID::Interpolate:
|
||||
case OP_TYPEID::MVN:
|
||||
case OP_TYPEID::NormalizeL2:
|
||||
case OP_TYPEID::PRelu:
|
||||
case OP_TYPEID::ScatterUpdate_v3:
|
||||
case OP_TYPEID::Selu:
|
||||
|
Loading…
Reference in New Issue
Block a user