From f744869551475179061efee74bf6522907bfa944 Mon Sep 17 00:00:00 2001 From: Oleg Pipikin Date: Wed, 13 Sep 2023 19:18:56 +0200 Subject: [PATCH] Refactor BroadcastLayerTest and GRUSequenceTest (#19486) * Refactor GRUSequenceTest * Refactor BroadcastLayerTest * Temporary disable GRUSequenceTest --- .../single_layer_tests/broadcast.cpp | 133 +++++++++------ .../single_layer_tests/gru_sequence.cpp | 87 ++++++---- .../include/single_op_tests/broadcast.hpp | 15 ++ .../include/single_op_tests/gru_sequence.hpp | 15 ++ .../single_layer/broadcast.hpp | 2 - .../single_layer/gru_sequence.hpp | 1 + .../single_op/broadcast.hpp | 32 ++++ .../single_op/gru_sequence.hpp | 38 +++++ .../src/single_layer/broadcast.cpp | 2 + .../src/single_layer/gru_sequence.cpp | 2 + .../src/single_op/broadcast.cpp | 71 ++++++++ .../src/single_op/gru_sequence.cpp | 161 ++++++++++++++++++ .../include/common_test_utils/test_enums.hpp | 33 +++- .../common_test_utils/src/test_enums.cpp | 138 +++++++++++++++ 14 files changed, 642 insertions(+), 88 deletions(-) create mode 100644 src/tests/functional/plugin/shared/include/single_op_tests/broadcast.hpp create mode 100644 src/tests/functional/plugin/shared/include/single_op_tests/gru_sequence.hpp create mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/broadcast.hpp create mode 100644 src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/gru_sequence.hpp create mode 100644 src/tests/functional/shared_test_classes/src/single_op/broadcast.cpp create mode 100644 src/tests/functional/shared_test_classes/src/single_op/gru_sequence.cpp create mode 100644 src/tests/test_utils/common_test_utils/src/test_enums.cpp diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/broadcast.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/broadcast.cpp index a9f07f2b032..d1129082dd6 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/broadcast.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/broadcast.cpp @@ -4,25 +4,23 @@ #include -#include "single_layer_tests/broadcast.hpp" +#include "single_op_tests/broadcast.hpp" #include "common_test_utils/test_constants.hpp" -using namespace LayerTestsDefinitions; - namespace { - -const std::vector inputPrecisions = { - InferenceEngine::Precision::FP32, - InferenceEngine::Precision::BF16, - InferenceEngine::Precision::I32, - InferenceEngine::Precision::I8, - InferenceEngine::Precision::U8 +using ov::test::BroadcastLayerTest; +const std::vector inputPrecisions = { + ov::element::f32, + ov::element::bf16, + ov::element::i32, + ov::element::i8, + ov::element::u8 }; -const std::vector inputTPrecisions = { - InferenceEngine::Precision::FP16, - InferenceEngine::Precision::I16, - InferenceEngine::Precision::BOOL +const std::vector inputTPrecisions = { + ov::element::f16, + ov::element::i16, + ov::element::boolean }; // NUMPY MODE ////////////////////////////////////////// @@ -37,11 +35,15 @@ std::vector> targetShapesNumpy1D = { {1, 4, 4}, }; +std::vector> input_shapes_1d_static = { + {{ 1 }} +}; + const auto numpyBroadcast1DInputParams = ::testing::Combine( ::testing::ValuesIn(targetShapesNumpy1D), - ::testing::Values(ngraph::AxisSet{}), //not used in numpy mode - ::testing::Values(ngraph::op::BroadcastType::NUMPY), - ::testing::Values(std::vector{1}), + ::testing::Values(ov::AxisSet{}), //not used in numpy mode + ::testing::Values(ov::op::BroadcastType::NUMPY), + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_1d_static)), ::testing::ValuesIn(inputPrecisions), ::testing::Values(ov::test::utils::DEVICE_CPU) ); @@ -51,9 +53,9 @@ INSTANTIATE_TEST_CASE_P(smoke_TestNumpyBroadcast1D, BroadcastLayerTest, numpyBro INSTANTIATE_TEST_CASE_P(smoke_PrecTransformation, BroadcastLayerTest, ::testing::Combine( ::testing::Values(targetShapesNumpy1D[0]), - ::testing::Values(ngraph::AxisSet{}), //not used in numpy mode - ::testing::Values(ngraph::op::BroadcastType::NUMPY), - ::testing::Values(std::vector{1}), + ::testing::Values(ov::AxisSet{}), //not used in numpy mode + ::testing::Values(ov::op::BroadcastType::NUMPY), + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_1d_static)), ::testing::ValuesIn(inputTPrecisions), ::testing::Values(ov::test::utils::DEVICE_CPU)), BroadcastLayerTest::getTestCaseName); @@ -66,11 +68,15 @@ std::vector> targetShapesNumpy2D = { {2, 2, 3, 6}, }; +const std::vector> input_shapes_2d_static = { + {{ 3, 1 }} +}; + const auto numpyBroadcast2DInputParams = ::testing::Combine( ::testing::ValuesIn(targetShapesNumpy2D), - ::testing::Values(ngraph::AxisSet{}), //not used in numpy mode - ::testing::Values(ngraph::op::BroadcastType::NUMPY), - ::testing::Values(std::vector{3, 1}), + ::testing::Values(ov::AxisSet{}), //not used in numpy mode + ::testing::Values(ov::op::BroadcastType::NUMPY), + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_2d_static)), ::testing::ValuesIn(inputPrecisions), ::testing::Values(ov::test::utils::DEVICE_CPU) ); @@ -85,35 +91,46 @@ std::vector> targetShapesNumpy3D = { {2, 1, 1, 4, 4}, }; +const std::vector> input_shapes_3d_static = { + {{ 1, 4, 1 }} +}; + const auto numpyBroadcast3DInputParams = ::testing::Combine( ::testing::ValuesIn(targetShapesNumpy3D), - ::testing::Values(ngraph::AxisSet{}), //not used in numpy mode - ::testing::Values(ngraph::op::BroadcastType::NUMPY), - ::testing::Values(std::vector{1, 4, 1}), + ::testing::Values(ov::AxisSet{}), //not used in numpy mode + ::testing::Values(ov::op::BroadcastType::NUMPY), + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_3d_static)), ::testing::ValuesIn(inputPrecisions), ::testing::Values(ov::test::utils::DEVICE_CPU) ); INSTANTIATE_TEST_CASE_P(smoke_TestNumpyBroadcast3D, BroadcastLayerTest, numpyBroadcast3DInputParams, BroadcastLayerTest::getTestCaseName); -// NGRAPH EVALUATE -const auto numpyBroadcastNgraphEvaluateParams = ::testing::Combine( +const std::vector> evaluate_shapes_static = { + {{ 1, 2, 1, 4, 1, 6, 1, 8, 1, 10 }} +}; + +// EVALUATE +const auto numpyBroadcastEvaluateParams = ::testing::Combine( ::testing::Values(std::vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}), - ::testing::Values(ngraph::AxisSet{}), //not used in numpy mode - ::testing::Values(ngraph::op::BroadcastType::NUMPY), - ::testing::Values(std::vector{1, 2, 1, 4, 1, 6, 1, 8, 1, 10}), + ::testing::Values(ov::AxisSet{}), //not used in numpy mode + ::testing::Values(ov::op::BroadcastType::NUMPY), + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(evaluate_shapes_static)), ::testing::ValuesIn(inputPrecisions), ::testing::Values(ov::test::utils::DEVICE_CPU) ); -INSTANTIATE_TEST_CASE_P(smoke_TestNumpyBroadcastNgraphEvaluate, BroadcastLayerTest, numpyBroadcastNgraphEvaluateParams, BroadcastLayerTest::getTestCaseName); +INSTANTIATE_TEST_CASE_P(smoke_TestNumpyBroadcastEvaluate, + BroadcastLayerTest, + numpyBroadcastEvaluateParams, + BroadcastLayerTest::getTestCaseName); // END NUMPY MODE ////////////////////////////////////// // BIDIRECTIONAL MODE ////////////////////////////////// -std::vector> inShapesBidi = { - {4, 1}, - {1, 4, 1}, - {4, 1, 1} +std::vector> shapes_bidi_static = { + {{4, 1}}, + {{1, 4, 1}}, + {{4, 1, 1}} }; std::vector> targetShapesBidi = { @@ -124,9 +141,9 @@ std::vector> targetShapesBidi = { const auto bidirectionalBroadcastParams = ::testing::Combine( ::testing::ValuesIn(targetShapesBidi), - ::testing::Values(ngraph::AxisSet{}), //not used in bidirectional mode - ::testing::Values(ngraph::op::BroadcastType::BIDIRECTIONAL), - ::testing::ValuesIn(inShapesBidi), + ::testing::Values(ov::AxisSet{}), //not used in bidirectional mode + ::testing::Values(ov::op::BroadcastType::BIDIRECTIONAL), + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(shapes_bidi_static)), ::testing::ValuesIn(inputPrecisions), ::testing::Values(ov::test::utils::DEVICE_CPU) ); @@ -135,15 +152,18 @@ INSTANTIATE_TEST_CASE_P(smoke_TestBidirectionalBroadcast, BroadcastLayerTest, bi // EXPLICIT MODE /////////////////////////////////////// // 1D -std::vector> inShapesExplicit1D = { {4} }; + +std::vector> input_shapes_explicit_1d_static = { + {{ 4 }} +}; std::vector> targetShapesExplicit1D = { {4, 2, 4}, {4, 2, 4, 1} }; -std::vector axes1D = { {0}, {2} }; +std::vector axes1D = { {0}, {2} }; const auto explicitBroadcast1DInputParams = ::testing::Combine( ::testing::ValuesIn(targetShapesExplicit1D), ::testing::ValuesIn(axes1D), - ::testing::Values(ngraph::op::BroadcastType::EXPLICIT), - ::testing::ValuesIn(inShapesExplicit1D), + ::testing::Values(ov::op::BroadcastType::EXPLICIT), + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_explicit_1d_static)), ::testing::ValuesIn(inputPrecisions), ::testing::Values(ov::test::utils::DEVICE_CPU) ); @@ -152,9 +172,9 @@ INSTANTIATE_TEST_CASE_P(smoke_TestExplicitBroadcast1D, BroadcastLayerTest, expli const auto bidirectionalBroadcastParams3 = ::testing::Combine( ::testing::Values(targetShapesBidi[2]), - ::testing::Values(ngraph::AxisSet{}), //not used in bidirectional mode - ::testing::Values(ngraph::op::BroadcastType::BIDIRECTIONAL), - ::testing::Values(inShapesBidi[2]), + ::testing::Values(ov::AxisSet{}), //not used in bidirectional mode + ::testing::Values(ov::op::BroadcastType::BIDIRECTIONAL), + ::testing::Values(ov::test::static_shapes_to_test_representation(shapes_bidi_static[2])), ::testing::ValuesIn(inputPrecisions), ::testing::Values(ov::test::utils::DEVICE_CPU) ); @@ -179,15 +199,18 @@ std::vector> targetShapesExplicit = { }; // 2D -std::vector> inShapesExplicit2D = { {2, 4} }; +std::vector> input_shapes_explicit_2d_static = { + {{ 2, 4 }} +}; + std::vector> targetShapesExplicit2D = { {2, 2, 4}, {2, 2, 4, 1}}; -std::vector axes2D = { {1, 2}, {0, 2} }; +std::vector axes2D = { {1, 2}, {0, 2} }; const auto explicitBroadcast2DInputParams = ::testing::Combine( ::testing::ValuesIn(targetShapesExplicit2D), ::testing::ValuesIn(axes2D), - ::testing::Values(ngraph::op::BroadcastType::EXPLICIT), - ::testing::ValuesIn(inShapesExplicit2D), + ::testing::Values(ov::op::BroadcastType::EXPLICIT), + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_explicit_2d_static)), ::testing::ValuesIn(inputPrecisions), ::testing::Values(ov::test::utils::DEVICE_CPU) ); @@ -195,15 +218,17 @@ const auto explicitBroadcast2DInputParams = ::testing::Combine( INSTANTIATE_TEST_CASE_P(smoke_TestExplicitBroadcast2D, BroadcastLayerTest, explicitBroadcast2DInputParams, BroadcastLayerTest::getTestCaseName); // 3D -std::vector> inShapesExplicit3D = { {2, 2, 2} }; +std::vector> input_shapes_explicit_3d_static = { + {{ 2, 2, 2 }} +}; std::vector> targetShapesExplicit3D = { {2, 2, 2, 2} }; -std::vector axes3D = { {0, 1, 2}, {0, 1, 3}, {0, 2, 3}, {1, 2, 3} }; +std::vector axes3D = { {0, 1, 2}, {0, 1, 3}, {0, 2, 3}, {1, 2, 3} }; const auto explicitBroadcast3DInputParams = ::testing::Combine( ::testing::ValuesIn(targetShapesExplicit3D), ::testing::ValuesIn(axes3D), - ::testing::Values(ngraph::op::BroadcastType::EXPLICIT), - ::testing::ValuesIn(inShapesExplicit3D), + ::testing::Values(ov::op::BroadcastType::EXPLICIT), + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_explicit_3d_static)), ::testing::ValuesIn(inputPrecisions), ::testing::Values(ov::test::utils::DEVICE_CPU) ); diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/gru_sequence.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/gru_sequence.cpp index 20622db576a..36a35d7bffd 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/gru_sequence.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/gru_sequence.cpp @@ -4,19 +4,40 @@ #include #include -#include "single_layer_tests/gru_sequence.hpp" +#include "single_op_tests/gru_sequence.hpp" #include "common_test_utils/test_constants.hpp" +#include "common_test_utils/test_enums.hpp" -using namespace LayerTestsDefinitions; +using ov::test::GRUSequenceTest; +using ov::test::utils::InputLayerType; +using ov::test::utils::SequenceTestsMode; namespace { - std::vector mode{ngraph::helpers::SequenceTestsMode::CONVERT_TO_TI_MAX_SEQ_LEN_CONST, - ngraph::helpers::SequenceTestsMode::CONVERT_TO_TI_RAND_SEQ_LEN_CONST, - ngraph::helpers::SequenceTestsMode::CONVERT_TO_TI_RAND_SEQ_LEN_PARAM, - ngraph::helpers::SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_PARAM, - ngraph::helpers::SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_CONST, - ngraph::helpers::SequenceTestsMode::PURE_SEQ}; + std::vector mode{SequenceTestsMode::CONVERT_TO_TI_MAX_SEQ_LEN_CONST, + SequenceTestsMode::CONVERT_TO_TI_RAND_SEQ_LEN_CONST, + SequenceTestsMode::CONVERT_TO_TI_RAND_SEQ_LEN_PARAM, + SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_PARAM, + SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_CONST, + SequenceTestsMode::PURE_SEQ}; // output values increase rapidly without clip, so use only seq_lengths = 2 + + const std::vector> input_shapes_zero_clip_static = { + // {batch, seq_lengths, input_size}, {batch, num_directions, hidden_size}, {batch}, + {{ 10, 2, 1}, { 10, 1, 1 }, { 10 }}, + {{ 10, 2, 1}, { 10, 1, 10 }, { 10 }}, + }; + const std::vector> input_shapes_bidirect_zero_clip_static = { + {{ 10, 2, 1}, { 10, 2, 1 }, { 10 }}, + {{ 10, 2, 1}, { 10, 2, 10 }, { 10 }}, + }; + const std::vector> input_shapes_non_zero_clip_static = { + {{ 10, 20, 1}, { 10, 1, 1 }, { 10 }}, + {{ 10, 20, 1}, { 10, 1, 10 }, { 10 }}, + }; + const std::vector> input_shapes_bidirect_non_zero_clip_static = { + {{ 10, 20, 1}, { 10, 2, 1 }, { 10 }}, + {{ 10, 20, 1}, { 10, 2, 10 }, { 10 }}, + }; std::vector seq_lengths_zero_clip{2}; std::vector seq_lengths_clip_non_zero{20}; std::vector batch{10}; @@ -27,41 +48,37 @@ namespace { std::vector linear_before_reset = {true, false}; std::vector clip{0.f}; std::vector clip_non_zeros{0.7f}; - std::vector direction = {ngraph::op::RecurrentSequenceDirection::FORWARD, - ngraph::op::RecurrentSequenceDirection::REVERSE, - ngraph::op::RecurrentSequenceDirection::BIDIRECTIONAL - }; - std::vector netPrecisions = {InferenceEngine::Precision::FP32, - InferenceEngine::Precision::FP16}; + std::vector direction = {ov::op::RecurrentSequenceDirection::FORWARD, + ov::op::RecurrentSequenceDirection::REVERSE}; + std::vector direction_bi = {ov::op::RecurrentSequenceDirection::BIDIRECTIONAL}; + + std::vector netPrecisions = {ov::element::f32, + ov::element::f16}; INSTANTIATE_TEST_SUITE_P(smoke_GRUSequenceCommonZeroClip, GRUSequenceTest, ::testing::Combine( ::testing::ValuesIn(mode), - ::testing::ValuesIn(seq_lengths_zero_clip), - ::testing::ValuesIn(batch), - ::testing::ValuesIn(hidden_size), + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_zero_clip_static)), // ::testing::ValuesIn(input_size), // hardcoded to 10 due to Combine supports up to 10 args ::testing::ValuesIn(activations), ::testing::ValuesIn(clip), ::testing::ValuesIn(linear_before_reset), ::testing::ValuesIn(direction), - ::testing::Values(ngraph::helpers::InputLayerType::CONSTANT), + ::testing::Values(InputLayerType::CONSTANT), ::testing::ValuesIn(netPrecisions), ::testing::Values(ov::test::utils::DEVICE_CPU)), GRUSequenceTest::getTestCaseName); - INSTANTIATE_TEST_SUITE_P(smoke_GRUSequenceCommonZeroClipNonConstantWRB, GRUSequenceTest, + INSTANTIATE_TEST_SUITE_P(smoke_GRUSequenceCommonZeroClipBidirect, GRUSequenceTest, ::testing::Combine( - ::testing::Values(ngraph::helpers::SequenceTestsMode::PURE_SEQ), - ::testing::ValuesIn(seq_lengths_zero_clip), - ::testing::ValuesIn(batch), - ::testing::ValuesIn(hidden_size), + ::testing::ValuesIn(mode), + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_bidirect_zero_clip_static)), // ::testing::ValuesIn(input_size), // hardcoded to 10 due to Combine supports up to 10 args ::testing::ValuesIn(activations), ::testing::ValuesIn(clip), ::testing::ValuesIn(linear_before_reset), - ::testing::ValuesIn(direction), - ::testing::Values(ngraph::helpers::InputLayerType::PARAMETER), + ::testing::ValuesIn(direction_bi), + ::testing::Values(InputLayerType::CONSTANT), ::testing::ValuesIn(netPrecisions), ::testing::Values(ov::test::utils::DEVICE_CPU)), GRUSequenceTest::getTestCaseName); @@ -69,15 +86,27 @@ namespace { INSTANTIATE_TEST_SUITE_P(smoke_GRUSequenceCommonClip, GRUSequenceTest, ::testing::Combine( ::testing::ValuesIn(mode), - ::testing::ValuesIn(seq_lengths_clip_non_zero), - ::testing::ValuesIn(batch), - ::testing::ValuesIn(hidden_size), + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_non_zero_clip_static)), // ::testing::ValuesIn(input_size), // hardcoded to 10 due to Combine supports up to 10 args ::testing::ValuesIn(activations), ::testing::ValuesIn(clip_non_zeros), ::testing::ValuesIn(linear_before_reset), ::testing::ValuesIn(direction), - ::testing::Values(ngraph::helpers::InputLayerType::CONSTANT), + ::testing::Values(InputLayerType::CONSTANT), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + GRUSequenceTest::getTestCaseName); + + INSTANTIATE_TEST_SUITE_P(smoke_GRUSequenceCommonClipBidirect, GRUSequenceTest, + ::testing::Combine( + ::testing::ValuesIn(mode), + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_bidirect_non_zero_clip_static)), + // ::testing::ValuesIn(input_size), // hardcoded to 10 due to Combine supports up to 10 args + ::testing::ValuesIn(activations), + ::testing::ValuesIn(clip_non_zeros), + ::testing::ValuesIn(linear_before_reset), + ::testing::ValuesIn(direction_bi), + ::testing::Values(InputLayerType::CONSTANT), ::testing::ValuesIn(netPrecisions), ::testing::Values(ov::test::utils::DEVICE_CPU)), GRUSequenceTest::getTestCaseName); diff --git a/src/tests/functional/plugin/shared/include/single_op_tests/broadcast.hpp b/src/tests/functional/plugin/shared/include/single_op_tests/broadcast.hpp new file mode 100644 index 00000000000..9d5366ec377 --- /dev/null +++ b/src/tests/functional/plugin/shared/include/single_op_tests/broadcast.hpp @@ -0,0 +1,15 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "shared_test_classes/single_op/broadcast.hpp" + +namespace ov { +namespace test { +TEST_P(BroadcastLayerTest, CompareWithRefs) { + run(); +} +} // namespace test +} // namespace ov diff --git a/src/tests/functional/plugin/shared/include/single_op_tests/gru_sequence.hpp b/src/tests/functional/plugin/shared/include/single_op_tests/gru_sequence.hpp new file mode 100644 index 00000000000..a9b378df31e --- /dev/null +++ b/src/tests/functional/plugin/shared/include/single_op_tests/gru_sequence.hpp @@ -0,0 +1,15 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "shared_test_classes/single_op/gru_sequence.hpp" + +namespace ov { +namespace test { +TEST_P(GRUSequenceTest, DISABLED_Inference) { + run(); +}; +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/broadcast.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/broadcast.hpp index 181b8307c0a..9b0e293a8d5 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/broadcast.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/broadcast.hpp @@ -10,8 +10,6 @@ #include #include "shared_test_classes/base/layer_test_utils.hpp" -#include "ngraph_functions/builders.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/gru_sequence.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/gru_sequence.hpp index 4ddcb48894e..9c0df506243 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/gru_sequence.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/gru_sequence.hpp @@ -12,6 +12,7 @@ #include "shared_test_classes/base/layer_test_utils.hpp" #include "ngraph_functions/builders.hpp" #include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "common_test_utils/test_enums.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/broadcast.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/broadcast.hpp new file mode 100644 index 00000000000..ac4e65222f3 --- /dev/null +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/broadcast.hpp @@ -0,0 +1,32 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include + +#include "shared_test_classes/base/ov_subgraph.hpp" + +namespace ov { +namespace test { +using BroadcastParamsTuple = typename std::tuple< + std::vector, // target shape + ov::AxisSet, // axes mapping + ov::op::BroadcastType, // broadcast mode + std::vector, // Input shape + ov::element::Type, // Model type + std::string>; // Device name + +class BroadcastLayerTest : public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseTest { +public: + static std::string getTestCaseName(const testing::TestParamInfo &obj); + +protected: + void SetUp() override; +}; +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/gru_sequence.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/gru_sequence.hpp new file mode 100644 index 00000000000..0b2b053cd48 --- /dev/null +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/gru_sequence.hpp @@ -0,0 +1,38 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include +#include "shared_test_classes/base/ov_subgraph.hpp" +#include "common_test_utils/test_enums.hpp" + +namespace ov { +namespace test { +using GRUSequenceParams = typename std::tuple< + ov::test::utils::SequenceTestsMode, // pure Sequence or TensorIterator + std::vector, // shapes + std::vector, // activations + float, // clip + bool, // linear_before_reset + ov::op::RecurrentSequenceDirection, // direction + ov::test::utils::InputLayerType, // WRB input type (Constant or Parameter) + ov::element::Type, // Network precision + std::string>; // Device name + +class GRUSequenceTest : public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseTest { +public: + static std::string getTestCaseName(const testing::TestParamInfo &obj); + +protected: + void SetUp() override; + void generate_inputs(const std::vector& targetInputStaticShapes) override; + ov::test::utils::SequenceTestsMode m_mode; + int64_t m_max_seq_len = 0; +}; +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/src/single_layer/broadcast.cpp b/src/tests/functional/shared_test_classes/src/single_layer/broadcast.cpp index 34e5aeb6091..36d41304aca 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/broadcast.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/broadcast.cpp @@ -4,6 +4,8 @@ #include "shared_test_classes/single_layer/broadcast.hpp" +#include "ngraph_functions/builders.hpp" + namespace LayerTestsDefinitions { std::string BroadcastLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { InferenceEngine::SizeVector targetShape; diff --git a/src/tests/functional/shared_test_classes/src/single_layer/gru_sequence.cpp b/src/tests/functional/shared_test_classes/src/single_layer/gru_sequence.cpp index c02665ff558..a741b6263fd 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/gru_sequence.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/gru_sequence.cpp @@ -5,6 +5,8 @@ #include "shared_test_classes/single_layer/gru_sequence.hpp" #include "transformations/op_conversions/bidirectional_sequences_decomposition.hpp" #include "transformations/op_conversions/convert_sequences_to_tensor_iterator.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" +#include "common_test_utils/test_enums.hpp" namespace LayerTestsDefinitions { diff --git a/src/tests/functional/shared_test_classes/src/single_op/broadcast.cpp b/src/tests/functional/shared_test_classes/src/single_op/broadcast.cpp new file mode 100644 index 00000000000..084eaf1fd0b --- /dev/null +++ b/src/tests/functional/shared_test_classes/src/single_op/broadcast.cpp @@ -0,0 +1,71 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "shared_test_classes/single_op/broadcast.hpp" + +#include "openvino/op/broadcast.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/parameter.hpp" +#include "openvino/op/result.hpp" + +namespace ov { +namespace test { +std::string BroadcastLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { + ov::Shape target_shape; + ov::AxisSet axes_mapping; + ov::op::BroadcastType mode; + std::vector shapes; + ov::element::Type type; + std::string device_name; + std::tie(target_shape, axes_mapping, mode, shapes, type, device_name) = obj.param; + + std::ostringstream result; + result << "targetShape=" << ov::test::utils::vec2str(target_shape) << "_"; + result << "axesMapping=" << ov::test::utils::set2str(axes_mapping) << "_"; + result << "mode=" << mode << "_"; + result << "IS=("; + for (const auto& shape : shapes) { + result << ov::test::utils::partialShape2str({shape.first}) << "_"; + } + result << ")_TS=("; + for (const auto& shape : shapes) { + for (const auto& item : shape.second) { + result << ov::test::utils::vec2str(item) << "_"; + } + } result << "IT=" << type.get_type_name() << "_"; + result << "trgDev=" << device_name; + return result.str(); +} + +void BroadcastLayerTest::SetUp() { + std::vector target_shape; + ov::AxisSet axes_mapping; + ov::op::BroadcastType mode; + std::vector shapes; + ov::element::Type model_type; + std::tie(target_shape, axes_mapping, mode, shapes, model_type, targetDevice) = this->GetParam(); + init_input_shapes(shapes); + + auto target_shape_const = ov::op::v0::Constant::create(ov::element::i64, {target_shape.size()}, target_shape); + ov::ParameterVector params{std::make_shared(model_type, inputDynamicShapes.front())}; + + std::shared_ptr broadcast; + if (mode == ngraph::op::BroadcastType::NONE) { + auto axis_set_const = ov::op::v0::Constant::create(ngraph::element::i64, {axes_mapping.size()}, axes_mapping.to_vector()); + broadcast = std::make_shared(params[0], + target_shape_const, + axis_set_const, + mode); + } else { // numpy/bidirectional modes + broadcast = std::make_shared(params[0], + target_shape_const, + mode); + } + + + ov::ResultVector results{std::make_shared(broadcast)}; + function = std::make_shared(results, params, "BroadcastInference"); +} +} // namespace test +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/src/single_op/gru_sequence.cpp b/src/tests/functional/shared_test_classes/src/single_op/gru_sequence.cpp new file mode 100644 index 00000000000..54e7140830d --- /dev/null +++ b/src/tests/functional/shared_test_classes/src/single_op/gru_sequence.cpp @@ -0,0 +1,161 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "shared_test_classes/single_op/gru_sequence.hpp" + +#include "transformations/op_conversions/bidirectional_sequences_decomposition.hpp" +#include "transformations/op_conversions/convert_sequences_to_tensor_iterator.hpp" +#include "openvino/pass/manager.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" +#include "ngraph_functions/utils/ngraph_helpers.hpp" + +namespace ov { +namespace test { +using ov::test::utils::InputLayerType; +using ov::test::utils::SequenceTestsMode; +using ngraph::helpers::is_tensor_iterator_exist; + +std::string GRUSequenceTest::getTestCaseName(const testing::TestParamInfo &obj) { + std::vector shapes; + SequenceTestsMode mode; + std::vector activations; + std::vector activations_alpha; + std::vector activations_beta; + float clip; + bool linear_before_reset; + ov::op::RecurrentSequenceDirection direction; + InputLayerType WRBType; + ov::element::Type type; + std::string targetDevice; + std::tie(mode, shapes, activations, clip, linear_before_reset, direction, WRBType, + type, targetDevice) = obj.param; + std::ostringstream result; + result << "mode=" << mode << "_"; + result << "IS=("; + for (const auto& shape : shapes) { + result << ov::test::utils::partialShape2str({shape.first}) << "_"; + } + result << ")_TS=("; + for (const auto& shape : shapes) { + for (const auto& item : shape.second) { + result << ov::test::utils::vec2str(item) << "_"; + } + } + result << "linear_before_reset=" << linear_before_reset << "_"; + result << "activations=" << ov::test::utils::vec2str(activations) << "_"; + result << "direction=" << direction << "_"; + result << "WRBType=" << WRBType << "_"; + result << "clip=" << clip << "_"; + result << "IT=" << type.get_type_name() << "_"; + result << "targetDevice=" << targetDevice << "_"; + return result.str(); +} + +void GRUSequenceTest::SetUp() { + std::vector shapes; + std::vector activations; + std::vector activations_alpha; + std::vector activations_beta; + float clip; + bool linear_before_reset; + ov::op::RecurrentSequenceDirection direction; + InputLayerType wbr_type; + std::tie(m_mode, shapes, activations, clip, linear_before_reset, direction, wbr_type, + inType, targetDevice) = this->GetParam(); + outType = inType; + init_input_shapes(shapes); + if (inType == ElementType::bf16 || inType == ElementType::f16) { + rel_threshold = 1e-2; + } + + const size_t seq_lengths = targetStaticShapes.front()[0][1]; + const size_t hidden_size = targetStaticShapes.front()[1][2]; + const size_t input_size = targetStaticShapes.front()[0][2]; + const size_t num_directions = direction == ov::op::RecurrentSequenceDirection::BIDIRECTIONAL ? 2 : 1; + const size_t batch = inputDynamicShapes[0][0].is_static() ? inputDynamicShapes[0][0].get_length() : + inputDynamicShapes[1][0].is_static() ? inputDynamicShapes[1][0].get_length() : + inputDynamicShapes.size() > 2 && inputDynamicShapes[2][0].is_static() ? inputDynamicShapes[2][0].get_length() : + 1lu; + + + ov::ParameterVector params{std::make_shared(inType, inputDynamicShapes[0]), + std::make_shared(inType, inputDynamicShapes[1])}; + + const auto& w_shape = ov::Shape{num_directions, 3 * hidden_size, input_size}; + const auto& r_shape = ov::Shape{num_directions, 3 * hidden_size, hidden_size}; + const auto& b_shape = ov::Shape{num_directions, (linear_before_reset ? 4 : 3) * hidden_size}; + + std::shared_ptr seq_lengths_node; + if (m_mode == SequenceTestsMode::CONVERT_TO_TI_MAX_SEQ_LEN_PARAM || + m_mode == SequenceTestsMode::CONVERT_TO_TI_RAND_SEQ_LEN_PARAM || + m_mode == SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_PARAM) { + auto param = std::make_shared(ov::element::i64, inputDynamicShapes[2]); + param->set_friendly_name("seq_lengths"); + params.push_back(param); + seq_lengths_node = param; + } else if (m_mode == SequenceTestsMode::CONVERT_TO_TI_RAND_SEQ_LEN_CONST || + m_mode == SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_CONST) { + auto tensor = ov::test::utils::create_and_fill_tensor(ov::element::i64, targetStaticShapes[0][2], seq_lengths, 0); + seq_lengths_node = std::make_shared(tensor); + } else { + std::vector lengths(batch, seq_lengths); + seq_lengths_node = std::make_shared(ov::element::i64, targetStaticShapes[0][2], lengths); + } + + std::shared_ptr w, r, b; + if (wbr_type == InputLayerType::PARAMETER) { + const auto w_param = std::make_shared(inType, w_shape); + const auto r_param = std::make_shared(inType, r_shape); + const auto b_param = std::make_shared(inType, b_shape); + w = w_param; + r = r_param; + b = b_param; + params.push_back(w_param); + params.push_back(r_param); + params.push_back(b_param); + } else { + auto tensor_w = ov::test::utils::create_and_fill_tensor(inType, w_shape); + w = std::make_shared(tensor_w); + + auto tensor_R = ov::test::utils::create_and_fill_tensor(inType, r_shape); + r = std::make_shared(tensor_R); + + auto tensor_B = ov::test::utils::create_and_fill_tensor(inType, b_shape); + b = std::make_shared(tensor_B); + } + + auto gru_sequence = std::make_shared(params[0], params[1], seq_lengths_node, w, r, b, hidden_size, direction, + activations, activations_alpha, activations_beta, clip, linear_before_reset); + ov::OutputVector results{std::make_shared(gru_sequence->output(0)), + std::make_shared(gru_sequence->output(1))}; + function = std::make_shared(results, params, "gru_sequence"); + + bool is_pure_sequence = (m_mode == SequenceTestsMode::PURE_SEQ || + m_mode == SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_PARAM || + m_mode == SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_CONST); + if (!is_pure_sequence) { + ov::pass::Manager manager; + if (direction == ov::op::RecurrentSequenceDirection::BIDIRECTIONAL) + manager.register_pass(); + manager.register_pass(); + manager.run_passes(function); + bool ti_found = is_tensor_iterator_exist(function); + EXPECT_EQ(ti_found, true); + } else { + bool ti_found = is_tensor_iterator_exist(function); + EXPECT_EQ(ti_found, false); + } +} + +void GRUSequenceTest::generate_inputs(const std::vector& target_input_static_shapes) { + inputs.clear(); + auto params = function->get_parameters(); + OPENVINO_ASSERT(target_input_static_shapes.size() >= params.size()); + for (int i = 0; i < params.size(); i++) { + auto tensor = ov::test::utils::create_and_fill_tensor(params[i]->get_element_type(), target_input_static_shapes[i], m_max_seq_len, 0); + inputs.insert({params[i], tensor}); + } +} +} // namespace test +} // namespace ov diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/test_enums.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/test_enums.hpp index ef538e13723..f662b5a868c 100644 --- a/src/tests/test_utils/common_test_utils/include/common_test_utils/test_enums.hpp +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/test_enums.hpp @@ -4,15 +4,42 @@ #pragma once +#include + namespace ov { namespace test { namespace utils { -enum ComparisonTypes { EQUAL, NOT_EQUAL, IS_FINITE, IS_INF, IS_NAN, LESS, LESS_EQUAL, GREATER, GREATER_EQUAL }; +enum class ComparisonTypes { EQUAL, NOT_EQUAL, IS_FINITE, IS_INF, IS_NAN, LESS, LESS_EQUAL, GREATER, GREATER_EQUAL }; -enum ConversionTypes { CONVERT, CONVERT_LIKE }; +enum class ConversionTypes { CONVERT, CONVERT_LIKE }; -enum ReductionType { Mean, Max, Min, Prod, Sum, LogicalOr, LogicalAnd, L1, L2 }; +enum class ReductionType { Mean, Max, Min, Prod, Sum, LogicalOr, LogicalAnd, L1, L2 }; + +enum class InputLayerType { + CONSTANT, + PARAMETER, +}; + +enum class SequenceTestsMode { + PURE_SEQ, + PURE_SEQ_RAND_SEQ_LEN_CONST, + PURE_SEQ_RAND_SEQ_LEN_PARAM, + CONVERT_TO_TI_MAX_SEQ_LEN_CONST, + CONVERT_TO_TI_MAX_SEQ_LEN_PARAM, + CONVERT_TO_TI_RAND_SEQ_LEN_CONST, + CONVERT_TO_TI_RAND_SEQ_LEN_PARAM, +}; + +std::ostream& operator<<(std::ostream& os, const ComparisonTypes type); + +std::ostream& operator<<(std::ostream& os, const ConversionTypes type); + +std::ostream& operator<<(std::ostream& os, const ReductionType type); + +std::ostream& operator<<(std::ostream& os, const InputLayerType type); + +std::ostream& operator<<(std::ostream& os, const SequenceTestsMode type); } // namespace utils } // namespace test diff --git a/src/tests/test_utils/common_test_utils/src/test_enums.cpp b/src/tests/test_utils/common_test_utils/src/test_enums.cpp new file mode 100644 index 00000000000..548d8ab34f2 --- /dev/null +++ b/src/tests/test_utils/common_test_utils/src/test_enums.cpp @@ -0,0 +1,138 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "common_test_utils/test_enums.hpp" + +namespace ov { +namespace test { +namespace utils { + +std::ostream& operator<<(std::ostream& os, const ComparisonTypes type) { + switch (type) { + case ComparisonTypes::EQUAL: + os << "Equal"; + break; + case ComparisonTypes::NOT_EQUAL: + os << "NotEqual"; + break; + case ComparisonTypes::GREATER: + os << "Greater"; + break; + case ComparisonTypes::GREATER_EQUAL: + os << "GreaterEqual"; + break; + case ComparisonTypes::IS_FINITE: + os << "IsFinite"; + break; + case ComparisonTypes::IS_INF: + os << "IsInf"; + break; + case ComparisonTypes::IS_NAN: + os << "IsNaN"; + break; + case ComparisonTypes::LESS: + os << "Less"; + break; + case ComparisonTypes::LESS_EQUAL: + os << "LessEqual"; + break; + default: + throw std::runtime_error("NOT_SUPPORTED_COMPARISON_TYPE"); + } + return os; +} + +std::ostream& operator<<(std::ostream& os, const ConversionTypes type) { + switch (type) { + case ConversionTypes::CONVERT: + os << "Convert"; + break; + case ConversionTypes::CONVERT_LIKE: + os << "ConvertLike"; + break; + default: + throw std::runtime_error("NOT_SUPPORTED_CONVERSION_TYPE"); + } + return os; +} + +std::ostream& operator<<(std::ostream& os, const ReductionType type) { + switch (type) { + case ReductionType::Mean: + os << "Mean"; + break; + case ReductionType::Max: + os << "Max"; + break; + case ReductionType::Min: + os << "Min"; + break; + case ReductionType::Prod: + os << "Prod"; + break; + case ReductionType::Sum: + os << "Sum"; + break; + case ReductionType::LogicalOr: + os << "LogicalOr"; + break; + case ReductionType::LogicalAnd: + os << "LogicalAnd"; + break; + case ReductionType::L1: + os << "ReduceL1"; + break; + case ReductionType::L2: + os << "ReduceL2"; + break; + default: + throw std::runtime_error("NOT_SUPPORTED_REDUCTION_TYPE"); + } + return os; +} +std::ostream& operator<<(std::ostream& os, const InputLayerType type) { + switch (type) { + case InputLayerType::CONSTANT: + os << "CONSTANT"; + break; + case InputLayerType::PARAMETER: + os << "PARAMETER"; + break; + default: + throw std::runtime_error("NOT_SUPPORTED_INPUT_LAYER_TYPE"); + } + return os; +} + +std::ostream& operator<<(std::ostream& os, const SequenceTestsMode type) { + switch (type) { + case SequenceTestsMode::PURE_SEQ: + os << "PURE_SEQ"; + break; + case SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_CONST: + os << "PURE_SEQ_RAND_SEQ_LEN_CONST"; + break; + case SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_PARAM: + os << "PURE_SEQ_RAND_SEQ_LEN_PARAM"; + break; + case SequenceTestsMode::CONVERT_TO_TI_RAND_SEQ_LEN_PARAM: + os << "CONVERT_TO_TI_RAND_SEQ_LEN_PARAM"; + break; + case SequenceTestsMode::CONVERT_TO_TI_RAND_SEQ_LEN_CONST: + os << "CONVERT_TO_TI_RAND_SEQ_LEN_CONST"; + break; + case SequenceTestsMode::CONVERT_TO_TI_MAX_SEQ_LEN_PARAM: + os << "CONVERT_TO_TI_MAX_SEQ_LEN_PARAM"; + break; + case SequenceTestsMode::CONVERT_TO_TI_MAX_SEQ_LEN_CONST: + os << "CONVERT_TO_TI_MAX_SEQ_LEN_CONST"; + break; + default: + throw std::runtime_error("NOT_SUPPORTED_OP_TYPE"); + } + return os; +} +} // namespace utils +} // namespace test +} // namespace ov