NormalizeL2 - reference implementation revision (#6349)

* Update SLT to generate input with 0 and eps sensitive treshold

* Fix normalize_l2 reference to enable empty axes case

* Simplify empty axes case

* Moved backend tests from fused_op

* Update SLT treshold

* Enable SLT for empty axes case

* More single layer tests

* Backend tests refactor

* Add helper function to for normalize_l2 backend tests

* Rewrite NormalizeL2 backend tests to use test case and common helper function

* Update layer tests

* Cleanup unit-tests manifest

* Add more backend tests

* Update comments

* Fix applying eps in MKLDNN normalize

* Add 5D tests

* Backend tests refactor

* Remove duplicated tests

* Upate IE tests manifest

* Add 5D SLT

* SLT cleanup

* Update skipped SLT for CPU

* Add NormalizeL2 to verified ops

* Skip GPU SLT for empty axes case

* Update SLT epsilon values

* Revert mkldnn_normalize changes

* Update tests and issue nubmers

* Remove reudndant axes copy from reference
This commit is contained in:
Katarzyna Mitrus 2021-07-12 15:28:24 +02:00 committed by GitHub
parent 811f4c5ae9
commit b7cc01b8e2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 1053 additions and 397 deletions

View File

@ -14,30 +14,162 @@ const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP16 InferenceEngine::Precision::FP16
}; };
const std::vector<std::vector<int64_t>> axes = { const std::vector<float> eps = {1e-12f, 1e-6f, 1e-3f, 0.1, 100};
{},
{1},
};
const std::vector<float> eps = {1e-7f, 1e-6f, 1e-5f, 1e-4f};
const std::vector<ngraph::op::EpsMode> epsMode = { const std::vector<ngraph::op::EpsMode> epsMode = {
ngraph::op::EpsMode::ADD, ngraph::op::EpsMode::ADD,
ngraph::op::EpsMode::MAX, ngraph::op::EpsMode::MAX,
}; };
const auto normL2params = testing::Combine( /* ============= 1D ============= */
testing::ValuesIn(axes), // [SKIPPED][CPU] Unsupported rank, Issue: 35627
const std::vector<std::vector<int64_t>> axes_1D = {
{},
{0}
};
const auto normL2params_1D = testing::Combine(
testing::ValuesIn(axes_1D),
testing::ValuesIn(eps), testing::ValuesIn(eps),
testing::ValuesIn(epsMode), testing::ValuesIn(epsMode),
testing::ValuesIn(std::vector<std::vector<size_t>>({{1, 3, 10, 5}, {1, 5, 3}})), testing::ValuesIn(std::vector<std::vector<size_t>>({{5}})),
testing::ValuesIn(netPrecisions), testing::ValuesIn(netPrecisions),
testing::Values(CommonTestUtils::DEVICE_CPU) testing::Values(CommonTestUtils::DEVICE_CPU)
); );
INSTANTIATE_TEST_SUITE_P( INSTANTIATE_TEST_SUITE_P(
NormalizeL2, smoke_NormalizeL2_1D,
NormalizeL2LayerTest, NormalizeL2LayerTest,
normL2params, normL2params_1D,
NormalizeL2LayerTest::getTestCaseName
);
/* ============= 2D ============= */
const std::vector<std::vector<int64_t>> axes_2D = {
{},
{1},
// [CPU] Unsupported axes, Issue: 59791
// {0},
// {0, 1},
};
const auto normL2params_2D = testing::Combine(
testing::ValuesIn(axes_2D),
testing::ValuesIn(eps),
testing::ValuesIn(epsMode),
testing::ValuesIn(std::vector<std::vector<size_t>>({{5, 3}})),
testing::ValuesIn(netPrecisions),
testing::Values(CommonTestUtils::DEVICE_CPU)
);
INSTANTIATE_TEST_SUITE_P(
smoke_NormalizeL2_2D,
NormalizeL2LayerTest,
normL2params_2D,
NormalizeL2LayerTest::getTestCaseName
);
/* ============= 3D ============= */
const std::vector<std::vector<int64_t>> axes_3D = {
{},
{1},
{1, 2},
// [CPU] Unsorted axes, Issue: 59794
// {2, 1},
// [CPU] Unsupported axes, Issue: 59791
// {0},
// {2},
// {0, 1},
// {0, 1, 2}
};
const auto normL2params_3D = testing::Combine(
testing::ValuesIn(axes_3D),
testing::ValuesIn(eps),
testing::ValuesIn(epsMode),
testing::ValuesIn(std::vector<std::vector<size_t>>({{2, 5, 3}})),
testing::ValuesIn(netPrecisions),
testing::Values(CommonTestUtils::DEVICE_CPU)
);
INSTANTIATE_TEST_SUITE_P(
smoke_NormalizeL2_3D,
NormalizeL2LayerTest,
normL2params_3D,
NormalizeL2LayerTest::getTestCaseName
);
/* ============= 4D ============= */
const std::vector<std::vector<int64_t>> axes_4D = {
{},
{1},
{1, 2, 3},
// [CPU] Unsorted axes, Issue: 59794
// {3, 1, 2},
// [CPU] Unsupported axes, Issue: 59791
// {0},
// {2},
// {3},
// {0, 1},
// {1, 2},
// {2, 3},
// {0, 1, 2, 3}
};
const auto normL2params_4D = testing::Combine(
testing::ValuesIn(axes_4D),
testing::ValuesIn(eps),
testing::ValuesIn(epsMode),
testing::ValuesIn(std::vector<std::vector<size_t>>({{2, 3, 10, 5}})),
testing::ValuesIn(netPrecisions),
testing::Values(CommonTestUtils::DEVICE_CPU)
);
INSTANTIATE_TEST_SUITE_P(
smoke_NormalizeL2_4D,
NormalizeL2LayerTest,
normL2params_4D,
NormalizeL2LayerTest::getTestCaseName
);
/* ============= 5D ============= */
// [SKIPPED][CPU] Unsupported rank, Issue: 35627
const std::vector<std::vector<int64_t>> axes_5D = {
{},
{0},
{1},
{2},
{3},
{4},
{0, 1},
{1, 2},
{2, 3},
{3, 4},
{1, 2, 3},
{2, 3, 4},
{4, 3, 2},
{1, 2, 3, 4},
{0, 1, 2, 3}
};
const auto normL2params_5D = testing::Combine(
testing::ValuesIn(axes_5D),
testing::ValuesIn(eps),
testing::ValuesIn(epsMode),
testing::ValuesIn(std::vector<std::vector<size_t>>({{2, 2, 3, 10, 5}})),
testing::ValuesIn(netPrecisions),
testing::Values(CommonTestUtils::DEVICE_CPU)
);
INSTANTIATE_TEST_SUITE_P(
smoke_NormalizeL2_5D,
NormalizeL2LayerTest,
normL2params_5D,
NormalizeL2LayerTest::getTestCaseName NormalizeL2LayerTest::getTestCaseName
); );
} // namespace } // namespace

View File

@ -51,14 +51,17 @@ std::vector<std::string> disabledTestPatterns() {
R"(.*ConvolutionLayerCPUTest.*BF16.*_inFmts=(ndhwc|nhwc).*)", R"(.*ConvolutionLayerCPUTest.*BF16.*_inFmts=(ndhwc|nhwc).*)",
// TODO: 56827. Sporadic test failures // TODO: 56827. Sporadic test failures
R"(.*smoke_Conv.+_FP32.ConvolutionLayerCPUTest\.CompareWithRefs.IS=\(1\.67.+\).*inFmts=n.+c.*_primitive=jit_avx2.*)", R"(.*smoke_Conv.+_FP32.ConvolutionLayerCPUTest\.CompareWithRefs.IS=\(1\.67.+\).*inFmts=n.+c.*_primitive=jit_avx2.*)",
// incorrect reference implementation
R"(.*NormalizeL2LayerTest.*axes=\(\).*)",
// lpt transformation produce the same names for MatMul and Multiply // lpt transformation produce the same names for MatMul and Multiply
R"(.*MatMulTransformation.*)", R"(.*MatMulTransformation.*)",
// incorrect jit_uni_planar_convolution with dilation = {1, 2, 1} and output channel 1 // incorrect jit_uni_planar_convolution with dilation = {1, 2, 1} and output channel 1
R"(.*smoke_Convolution3D.*D=\(1.2.1\)_O=1.*)", R"(.*smoke_Convolution3D.*D=\(1.2.1\)_O=1.*)",
// TODO: Issue: 35627. CPU Normalize supports from 2D to 4D blobs
R"(.*NormalizeL2_1D.*)",
R"(.*NormalizeL2_5D.*)",
// Issue: 59788. mkldnn_normalize_nchw applies eps after sqrt for across_spatial
R"(.*NormalizeL2_.*axes=\(1.2.*_eps=100.*)",
// Unsupported operation of type: NormalizeL2 name : Doesn't support reduction axes: (2.2) // Unsupported operation of type: NormalizeL2 name : Doesn't support reduction axes: (2.2)
R"(.*BF16NetworkRestore1.*)", R"(.*BF16NetworkRestore1.*)",
R"(.*MobileNet_ssd_with_branching.*)", R"(.*MobileNet_ssd_with_branching.*)",

View File

@ -55,6 +55,9 @@ std::vector<std::string> disabledTestPatterns() {
// TODO: Issue: 54194 // TODO: Issue: 54194
R"(.*ActivationLayerTest.*SoftPlus.*)", R"(.*ActivationLayerTest.*SoftPlus.*)",
// need to implement Export / Import // need to implement Export / Import
R"(.*IEClassImportExportTestP.*)" R"(.*IEClassImportExportTestP.*)",
// TODO: Issue: 59586, NormalizeL2 output mismatch for empty axes case
R"(.*NormalizeL2LayerTest.*axes=\(\).*)"
}; };
} }

View File

@ -29,7 +29,7 @@ public:
protected: protected:
void SetUp() override; void SetUp() override;
InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo &info) const override;
}; };
} // namespace LayerTestsDefinitions } // namespace LayerTestsDefinitions

View File

@ -25,6 +25,17 @@ std::string NormalizeL2LayerTest::getTestCaseName(testing::TestParamInfo<Normali
return result.str(); return result.str();
} }
InferenceEngine::Blob::Ptr NormalizeL2LayerTest::GenerateInput(const InferenceEngine::InputInfo &info) const {
InferenceEngine::Blob::Ptr blobPtr;
const std::string& name = info.name();
if (name == "data") {
blobPtr = FuncTestUtils::createAndFillBlobFloat(info.getTensorDesc(), 10, -5, 7, 222);
} else {
blobPtr = LayerTestsUtils::LayerTestsCommon::GenerateInput(info);
}
return blobPtr;
}
void NormalizeL2LayerTest::SetUp() { void NormalizeL2LayerTest::SetUp() {
InferenceEngine::SizeVector inputShape; InferenceEngine::SizeVector inputShape;
std::vector<int64_t> axes; std::vector<int64_t> axes;
@ -34,7 +45,9 @@ void NormalizeL2LayerTest::SetUp() {
std::tie(axes, eps, epsMode, inputShape, netPrecision, targetDevice) = this->GetParam(); std::tie(axes, eps, epsMode, inputShape, netPrecision, targetDevice) = this->GetParam();
auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
auto params = ngraph::builder::makeParams(ngPrc, {inputShape}); auto params = ngraph::builder::makeParams(ngPrc, {inputShape});
auto norm = ngraph::builder::makeNormalizeL2(params[0], axes, eps, epsMode); auto data_input = params[0];
data_input->set_friendly_name("data");
auto norm = ngraph::builder::makeNormalizeL2(data_input, axes, eps, epsMode);
ngraph::ResultVector results{std::make_shared<ngraph::opset4::Result>(norm)}; ngraph::ResultVector results{std::make_shared<ngraph::opset4::Result>(norm)};
function = std::make_shared<ngraph::Function>(results, params, "NormalizeL2"); function = std::make_shared<ngraph::Function>(results, params, "NormalizeL2");
} }

View File

@ -62,6 +62,7 @@ VERIFIED_OP_REFERENCES = [
'NonMaxSuppression-4', 'NonMaxSuppression-4',
'NonMaxSuppression-5', 'NonMaxSuppression-5',
'NonZero-3', 'NonZero-3',
'NormalizeL2-1',
'PriorBox-1', 'PriorBox-1',
'Proposal-1', 'Proposal-1',
'Proposal-4', 'Proposal-4',

View File

@ -22,27 +22,31 @@ namespace ngraph
float eps, float eps,
op::EpsMode eps_mode) op::EpsMode eps_mode)
{ {
AxisSet axes = reduction_axes;
if (reduction_axes.empty()) if (reduction_axes.empty())
{ {
std::vector<size_t> axes_vec(data_shape.size()); // When axes is an empty list, then each `data` element is divided by itself
std::iota(axes_vec.begin(), axes_vec.end(), 0); // resulting value 1 for all non-zero elements
axes = AxisSet(axes_vec); for (size_t i = 0; i < shape_size(data_shape); ++i)
{
out[i] = data[i] == 0 ? 0 : 1;
} }
return;
}
std::vector<T> sqr_data(shape_size(data_shape)); std::vector<T> sqr_data(shape_size(data_shape));
for (size_t i = 0; i < shape_size(data_shape); i++) for (size_t i = 0; i < shape_size(data_shape); ++i)
{ {
sqr_data[i] = data[i] * data[i]; sqr_data[i] = data[i] * data[i];
} }
Shape reduce_shape = data_shape; Shape reduce_shape = data_shape;
for (auto axis : axes) for (auto axis : reduction_axes)
{ {
reduce_shape[axis] = 1; reduce_shape[axis] = 1;
} }
std::vector<T> sum_data(shape_size(reduce_shape)); std::vector<T> sum_data(shape_size(reduce_shape));
sum(sqr_data.data(), sum_data.data(), data_shape, axes); sum(sqr_data.data(), sum_data.data(), data_shape, reduction_axes);
autobroadcast_binop(data, autobroadcast_binop(data,
sum_data.data(), sum_data.data(),
out, out,

View File

@ -78,222 +78,6 @@ NGRAPH_TEST(${BACKEND_NAME}, hardsigmoid)
test_case.run(); test_case.run();
} }
// TODO: Issue: 37521
NGRAPH_TEST(${BACKEND_NAME}, DISABLED_normalize_across_chw_4d)
{
Shape data_shape{1, 2, 3, 4};
auto data = make_shared<op::Parameter>(element::f32, data_shape);
const auto axes = make_shared<op::Constant>(element::i64, Shape{3}, vector<int64_t>{1, 2, 3});
float eps{1e-6f};
auto eps_mode = op::EpsMode::ADD;
auto normalize = make_shared<op::v0::NormalizeL2>(data, axes, eps, eps_mode);
auto function = make_shared<Function>(NodeVector{normalize}, ParameterVector{data});
auto test_case = test::TestCase<TestEngine>(function);
vector<float> input_data(shape_size(data_shape));
iota(begin(input_data), end(input_data), 1);
test_case.add_input<float>(input_data);
test_case.add_expected_output<float>(
data_shape, {0.01428571f, 0.02857143f, 0.04285714f, 0.05714286f, 0.07142857f, 0.08571429f,
0.1f, 0.11428571f, 0.12857144f, 0.14285715f, 0.15714286f, 0.17142858f,
0.18571429f, 0.2f, 0.21428572f, 0.22857143f, 0.24285714f, 0.25714287f,
0.27142859f, 0.2857143f, 0.30000001f, 0.31428573f, 0.32857144f, 0.34285715f});
test_case.run(DEFAULT_FLOAT_TOLERANCE_BITS + 1);
}
NGRAPH_TEST(${BACKEND_NAME}, DISABLED_normalize_across_empty_axes_input)
{
Shape data_shape{1, 2, 3, 4};
auto data = make_shared<op::Parameter>(element::f32, data_shape);
const auto axes = make_shared<op::Constant>(element::i64, Shape{0}, vector<int64_t>{});
float eps{1e-6f};
auto eps_mode = op::EpsMode::ADD;
auto normalize = make_shared<op::v0::NormalizeL2>(data, axes, eps, eps_mode);
auto function = make_shared<Function>(NodeVector{normalize}, ParameterVector{data});
auto test_case = test::TestCase<TestEngine>(function);
vector<float> input_data(shape_size(data_shape));
iota(begin(input_data), end(input_data), 1);
test_case.add_input<float>(input_data);
test_case.add_expected_output<float>(
data_shape,
vector<float>{0.01428571, 0.02857143, 0.04285714, 0.05714286, 0.07142857, 0.08571429,
0.1, 0.11428571, 0.12857144, 0.14285715, 0.15714286, 0.17142858,
0.18571429, 0.2, 0.21428572, 0.22857143, 0.24285714, 0.25714287,
0.27142859, 0.2857143, 0.3, 0.31428573, 0.32857144, 0.34285715});
test_case.run(DEFAULT_FLOAT_TOLERANCE_BITS + 1);
}
NGRAPH_TEST(${BACKEND_NAME}, DISABLED_normalize_across_h_4d)
{
Shape data_shape{1, 2, 3, 4};
auto data = make_shared<op::Parameter>(element::f32, data_shape);
const auto axes = make_shared<op::Constant>(element::i64, Shape{1}, vector<int64_t>{1});
float eps{1e-6f};
auto eps_mode = op::EpsMode::ADD;
auto normalize = make_shared<op::v0::NormalizeL2>(data, axes, eps, eps_mode);
auto function = make_shared<Function>(NodeVector{normalize}, ParameterVector{data});
auto test_case = test::TestCase<TestEngine>(function);
vector<float> input_data(shape_size(data_shape));
iota(begin(input_data), end(input_data), 1);
test_case.add_input<float>(input_data);
test_case.add_expected_output<float>(
data_shape, {0.0766965f, 0.14142136f, 0.19611613f, 0.24253564f, 0.28216633f, 0.31622776f,
0.34570536f, 0.37139067f, 0.39391932f, 0.41380295f, 0.43145549f, 0.44721359f,
0.99705452f, 0.98994946f, 0.98058069f, 0.97014254f, 0.95936549f, 0.94868332f,
0.93834311f, 0.92847669f, 0.91914505f, 0.91036648f, 0.90213418f, 0.89442718f});
test_case.run(DEFAULT_FLOAT_TOLERANCE_BITS + 1);
}
NGRAPH_TEST(${BACKEND_NAME}, DISABLED_normalize_across_1axis_5d)
{
Shape data_shape{1, 2, 2, 2, 3};
auto data = make_shared<op::Parameter>(element::f32, data_shape);
const auto axes = make_shared<op::Constant>(element::i64, Shape{1}, vector<int64_t>{1});
float eps{1e-6f};
auto eps_mode = op::EpsMode::ADD;
auto normalize = make_shared<op::v0::NormalizeL2>(data, axes, eps, eps_mode);
auto function = make_shared<Function>(NodeVector{normalize}, ParameterVector{data});
auto test_case = test::TestCase<TestEngine>(function);
vector<float> input_data(shape_size(data_shape));
iota(begin(input_data), end(input_data), 1);
test_case.add_input<float>(input_data);
test_case.add_expected_output<float>(
data_shape, {0.0766965f, 0.14142136f, 0.19611613f, 0.24253564f, 0.28216633f, 0.31622776f,
0.34570536f, 0.37139067f, 0.39391932f, 0.41380295f, 0.43145549f, 0.44721359f,
0.99705452f, 0.98994946f, 0.98058069f, 0.97014254f, 0.95936549f, 0.94868332f,
0.93834311f, 0.92847669f, 0.91914505f, 0.91036648f, 0.90213418f, 0.89442718f});
test_case.run(DEFAULT_FLOAT_TOLERANCE_BITS + 1);
}
NGRAPH_TEST(${BACKEND_NAME}, DISABLED_normalize_across_123axes_5d)
{
Shape data_shape{1, 2, 2, 2, 3};
auto data = make_shared<op::Parameter>(element::f32, data_shape);
const auto axes = make_shared<op::Constant>(element::i64, Shape{3}, vector<int64_t>{1, 2, 3});
float eps{1e-6f};
auto eps_mode = op::EpsMode::ADD;
auto normalize = make_shared<op::v0::NormalizeL2>(data, axes, eps, eps_mode);
auto function = make_shared<Function>(NodeVector{normalize}, ParameterVector{data});
auto test_case = test::TestCase<TestEngine>(function);
vector<float> input_data(shape_size(data_shape));
iota(begin(input_data), end(input_data), 1);
test_case.add_input<float>(input_data);
test_case.add_expected_output<float>(
data_shape, {0.02638899f, 0.04956816f, 0.070014f, 0.10555596f, 0.1239204f, 0.140028f,
0.18472293f, 0.19827265f, 0.210042f, 0.26388991f, 0.27262488f, 0.280056f,
0.34305686f, 0.34697714f, 0.35007f, 0.42222384f, 0.42132938f, 0.420084f,
0.50139081f, 0.49568161f, 0.49009803f, 0.58055776f, 0.57003385f, 0.560112f});
test_case.run(DEFAULT_FLOAT_TOLERANCE_BITS + 1);
}
NGRAPH_TEST(${BACKEND_NAME}, DISABLED_normalize_across_c_2x2_shape)
{
Shape data_shape{2, 2};
auto data = make_shared<op::Parameter>(element::f32, data_shape);
const auto axes = make_shared<op::Constant>(element::i64, Shape{}, vector<int64_t>{1});
float eps{1e-6f};
auto eps_mode = op::EpsMode::ADD;
auto normalize = make_shared<op::v0::NormalizeL2>(data, axes, eps, eps_mode);
auto function = make_shared<Function>(NodeVector{normalize}, ParameterVector{data});
auto test_case = test::TestCase<TestEngine>(function);
vector<float> input_data(shape_size(data_shape));
iota(begin(input_data), end(input_data), 1);
test_case.add_input<float>(input_data);
test_case.add_expected_output<float>(data_shape,
{0.44721353f, 0.89442706f, 0.60000002f, 0.80000001f});
test_case.run(DEFAULT_FLOAT_TOLERANCE_BITS + 1);
}
NGRAPH_TEST(${BACKEND_NAME}, DISABLED_normalize_across_c_2x4_shape)
{
Shape data_shape{2, 4};
auto data = make_shared<op::Parameter>(element::f32, data_shape);
const auto axes = make_shared<op::Constant>(element::i64, Shape{}, vector<int64_t>{1});
float eps{1e-6f};
auto eps_mode = op::EpsMode::ADD;
auto normalize = make_shared<op::v0::NormalizeL2>(data, axes, eps, eps_mode);
auto function = make_shared<Function>(NodeVector{normalize}, ParameterVector{data});
auto test_case = test::TestCase<TestEngine>(function);
vector<float> input_data(shape_size(data_shape));
iota(begin(input_data), end(input_data), 1);
test_case.add_input<float>(input_data);
test_case.add_expected_output<float>(data_shape,
{0.18257418f,
0.36514837f,
0.54772252f,
0.73029673f,
0.37904903f,
0.45485884f,
0.53066862f,
0.60647845f});
test_case.run(DEFAULT_FLOAT_TOLERANCE_BITS + 1);
}
NGRAPH_TEST(${BACKEND_NAME}, DISABLED_normalize_across_chw_4d_max_bias)
{
Shape data_shape{1, 2, 3, 4};
auto data = make_shared<op::Parameter>(element::f32, data_shape);
const auto axes = make_shared<op::Constant>(element::i64, Shape{3}, vector<int64_t>{1, 2, 3});
float eps{5000};
auto eps_mode = op::EpsMode::MAX;
auto normalize = make_shared<op::v0::NormalizeL2>(data, axes, eps, eps_mode);
auto function = make_shared<Function>(NodeVector{normalize}, ParameterVector{data});
auto test_case = test::TestCase<TestEngine>(function);
vector<float> input_data(shape_size(data_shape));
iota(begin(input_data), end(input_data), 1);
test_case.add_input<float>(input_data);
test_case.add_expected_output<float>(
data_shape, {0.01414214f, 0.02828427f, 0.04242641f, 0.05656854f, 0.07071068f, 0.08485281f,
0.09899495f, 0.11313709f, 0.12727922f, 0.14142136f, 0.15556349f, 0.16970563f,
0.18384777f, 0.1979899f, 0.21213204f, 0.22627418f, 0.2404163f, 0.25455844f,
0.26870057f, 0.28284273f, 0.29698485f, 0.31112698f, 0.32526913f, 0.33941126f});
test_case.run(DEFAULT_FLOAT_TOLERANCE_BITS + 1);
}
NGRAPH_TEST(${BACKEND_NAME}, mvn_mean_normalization) NGRAPH_TEST(${BACKEND_NAME}, mvn_mean_normalization)
{ {
Shape data_shape{1, 2, 5}; Shape data_shape{1, 2, 5};

File diff suppressed because it is too large Load Diff

View File

@ -339,13 +339,7 @@ max_matrix_to_scalar_zero_by_zero
max_3d_eliminate_zero_dim max_3d_eliminate_zero_dim
lrn_across_empty lrn_across_empty
lrn_2d_across_empty lrn_2d_across_empty
normalize_across_empty_axes_input
normalize_l2_all_mode_add
normalize_l2_none_mode_add
normalize_l2_zero_mode_add
normalize_l2_all_mode_max
normalize_l2_none_mode_max
normalize_l2_zero_mode_max
squeeze_default_axes squeeze_default_axes
dynamic_abc dynamic_abc
broadcast_v1 broadcast_v1
@ -426,7 +420,6 @@ lrn_across_all_dims
elu elu
elu_negative_alpha elu_negative_alpha
max_pool_2d_1channel_1image_overpadded max_pool_2d_1channel_1image_overpadded
normalize_across_chw_4d_max_bias
grn_2d_with_bias grn_2d_with_bias
erf erf
divide_adjoint_stability divide_adjoint_stability
@ -676,10 +669,6 @@ conv_bias_bprop_2d
# Cannot cast ngraph node ConvolutionBiasAdd to CNNLayer! # Cannot cast ngraph node ConvolutionBiasAdd to CNNLayer!
conv_bias_add_2d conv_bias_add_2d
# [Validation] Argument must have rank >= 2 and <= 4 (argument shape: {1,2,2,2,3})
normalize_across_1axis_5d
normalize_across_123axes_5d
# Unsupported operator detected in the graph. # Unsupported operator detected in the graph.
gemm gemm
gemm_C gemm_C
@ -917,6 +906,56 @@ non_zero
non_zero_all_1s non_zero_all_1s
non_zero_all_0s non_zero_all_0s
# NormalizeL2 - output mismatch,
# mkldnn_normalize_nchw applies eps after sqrt for across_spatial
# Issue: 59586
IE_CPU.normalize_l2_4D_axes_123_big_eps_max
IE_CPU.normalize_l2_4D_axes_123_big_eps_add
# NomalizeL2 - unsorted axes are not supported,
# message: "Doesn't support reduction axes: (3.1.2)"
# Issue: 59794
IE_CPU.normalize_l2_4D_axes_unsorted_312_max
IE_CPU.normalize_l2_4D_axes_unsorted_312_add
# NormalizeL2 - Plugins support normalize over "channel" dimension
# or "channel + all spatial" dimensions for 2D, 3D or 4D cases
# Issue: 35627, 59791
normalize_l2_1D_axes_empty_add
normalize_l2_1D_axes_empty_max
normalize_l2_1D_axes_0_add
normalize_l2_1D_axes_0_max
normalize_l2_2D_axes_0_add
normalize_l2_2D_axes_0_max
normalize_l2_2D_axes_01_add
normalize_l2_2D_axes_01_max
normalize_l2_3D_axes_2_add
normalize_l2_3D_axes_2_max
normalize_l2_4D_axes_0_max
normalize_l2_4D_axes_0_add
normalize_l2_4D_axes_2_max
normalize_l2_4D_axes_2_add
normalize_l2_4D_axes_3_max
normalize_l2_4D_axes_3_add
normalize_l2_4D_axes_23_max
normalize_l2_4D_axes_23_add
normalize_l2_4D_axes_0123_max
normalize_l2_4D_axes_0123_add
normalize_l2_5D_axes_empty_max
normalize_l2_5D_axes_empty_add
normalize_l2_5D_axes_1_max
normalize_l2_5D_axes_1_add
normalize_l2_5D_axes_2_max
normalize_l2_5D_axes_2_add
normalize_l2_5D_axes_3_max
normalize_l2_5D_axes_3_add
normalize_l2_5D_axes_4_max
normalize_l2_5D_axes_4_add
normalize_l2_5D_axes_34_max
normalize_l2_5D_axes_34_add
normalize_l2_5D_axes_234_max
normalize_l2_5D_axes_234_add
# (Constant W, R inputs are required) Ticket: 49207 # (Constant W, R inputs are required) Ticket: 49207
# W, R inputs as Parameter, default clip value # W, R inputs as Parameter, default clip value
# Operation has a form that is not supported. # Operation has a form that is not supported.