Refactor BroadcastLayerTest and GRUSequenceTest (#19486)

* Refactor GRUSequenceTest

* Refactor BroadcastLayerTest

* Temporary disable GRUSequenceTest
This commit is contained in:
Oleg Pipikin 2023-09-13 19:18:56 +02:00 committed by GitHub
parent 0234357869
commit f744869551
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
14 changed files with 642 additions and 88 deletions

View File

@ -4,25 +4,23 @@
#include <vector>
#include "single_layer_tests/broadcast.hpp"
#include "single_op_tests/broadcast.hpp"
#include "common_test_utils/test_constants.hpp"
using namespace LayerTestsDefinitions;
namespace {
const std::vector<InferenceEngine::Precision> inputPrecisions = {
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::BF16,
InferenceEngine::Precision::I32,
InferenceEngine::Precision::I8,
InferenceEngine::Precision::U8
using ov::test::BroadcastLayerTest;
const std::vector<ov::element::Type> inputPrecisions = {
ov::element::f32,
ov::element::bf16,
ov::element::i32,
ov::element::i8,
ov::element::u8
};
const std::vector<InferenceEngine::Precision> inputTPrecisions = {
InferenceEngine::Precision::FP16,
InferenceEngine::Precision::I16,
InferenceEngine::Precision::BOOL
const std::vector<ov::element::Type> inputTPrecisions = {
ov::element::f16,
ov::element::i16,
ov::element::boolean
};
// NUMPY MODE //////////////////////////////////////////
@ -37,11 +35,15 @@ std::vector<std::vector<size_t>> targetShapesNumpy1D = {
{1, 4, 4},
};
std::vector<std::vector<ov::Shape>> input_shapes_1d_static = {
{{ 1 }}
};
const auto numpyBroadcast1DInputParams = ::testing::Combine(
::testing::ValuesIn(targetShapesNumpy1D),
::testing::Values(ngraph::AxisSet{}), //not used in numpy mode
::testing::Values(ngraph::op::BroadcastType::NUMPY),
::testing::Values(std::vector<size_t>{1}),
::testing::Values(ov::AxisSet{}), //not used in numpy mode
::testing::Values(ov::op::BroadcastType::NUMPY),
::testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_1d_static)),
::testing::ValuesIn(inputPrecisions),
::testing::Values(ov::test::utils::DEVICE_CPU)
);
@ -51,9 +53,9 @@ INSTANTIATE_TEST_CASE_P(smoke_TestNumpyBroadcast1D, BroadcastLayerTest, numpyBro
INSTANTIATE_TEST_CASE_P(smoke_PrecTransformation, BroadcastLayerTest,
::testing::Combine(
::testing::Values(targetShapesNumpy1D[0]),
::testing::Values(ngraph::AxisSet{}), //not used in numpy mode
::testing::Values(ngraph::op::BroadcastType::NUMPY),
::testing::Values(std::vector<size_t>{1}),
::testing::Values(ov::AxisSet{}), //not used in numpy mode
::testing::Values(ov::op::BroadcastType::NUMPY),
::testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_1d_static)),
::testing::ValuesIn(inputTPrecisions),
::testing::Values(ov::test::utils::DEVICE_CPU)),
BroadcastLayerTest::getTestCaseName);
@ -66,11 +68,15 @@ std::vector<std::vector<size_t>> targetShapesNumpy2D = {
{2, 2, 3, 6},
};
const std::vector<std::vector<ov::Shape>> input_shapes_2d_static = {
{{ 3, 1 }}
};
const auto numpyBroadcast2DInputParams = ::testing::Combine(
::testing::ValuesIn(targetShapesNumpy2D),
::testing::Values(ngraph::AxisSet{}), //not used in numpy mode
::testing::Values(ngraph::op::BroadcastType::NUMPY),
::testing::Values(std::vector<size_t>{3, 1}),
::testing::Values(ov::AxisSet{}), //not used in numpy mode
::testing::Values(ov::op::BroadcastType::NUMPY),
::testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_2d_static)),
::testing::ValuesIn(inputPrecisions),
::testing::Values(ov::test::utils::DEVICE_CPU)
);
@ -85,35 +91,46 @@ std::vector<std::vector<size_t>> targetShapesNumpy3D = {
{2, 1, 1, 4, 4},
};
const std::vector<std::vector<ov::Shape>> input_shapes_3d_static = {
{{ 1, 4, 1 }}
};
const auto numpyBroadcast3DInputParams = ::testing::Combine(
::testing::ValuesIn(targetShapesNumpy3D),
::testing::Values(ngraph::AxisSet{}), //not used in numpy mode
::testing::Values(ngraph::op::BroadcastType::NUMPY),
::testing::Values(std::vector<size_t>{1, 4, 1}),
::testing::Values(ov::AxisSet{}), //not used in numpy mode
::testing::Values(ov::op::BroadcastType::NUMPY),
::testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_3d_static)),
::testing::ValuesIn(inputPrecisions),
::testing::Values(ov::test::utils::DEVICE_CPU)
);
INSTANTIATE_TEST_CASE_P(smoke_TestNumpyBroadcast3D, BroadcastLayerTest, numpyBroadcast3DInputParams, BroadcastLayerTest::getTestCaseName);
// NGRAPH EVALUATE
const auto numpyBroadcastNgraphEvaluateParams = ::testing::Combine(
const std::vector<std::vector<ov::Shape>> evaluate_shapes_static = {
{{ 1, 2, 1, 4, 1, 6, 1, 8, 1, 10 }}
};
// EVALUATE
const auto numpyBroadcastEvaluateParams = ::testing::Combine(
::testing::Values(std::vector<size_t>{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}),
::testing::Values(ngraph::AxisSet{}), //not used in numpy mode
::testing::Values(ngraph::op::BroadcastType::NUMPY),
::testing::Values(std::vector<size_t>{1, 2, 1, 4, 1, 6, 1, 8, 1, 10}),
::testing::Values(ov::AxisSet{}), //not used in numpy mode
::testing::Values(ov::op::BroadcastType::NUMPY),
::testing::ValuesIn(ov::test::static_shapes_to_test_representation(evaluate_shapes_static)),
::testing::ValuesIn(inputPrecisions),
::testing::Values(ov::test::utils::DEVICE_CPU)
);
INSTANTIATE_TEST_CASE_P(smoke_TestNumpyBroadcastNgraphEvaluate, BroadcastLayerTest, numpyBroadcastNgraphEvaluateParams, BroadcastLayerTest::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_TestNumpyBroadcastEvaluate,
BroadcastLayerTest,
numpyBroadcastEvaluateParams,
BroadcastLayerTest::getTestCaseName);
// END NUMPY MODE //////////////////////////////////////
// BIDIRECTIONAL MODE //////////////////////////////////
std::vector<std::vector<size_t>> inShapesBidi = {
{4, 1},
{1, 4, 1},
{4, 1, 1}
std::vector<std::vector<ov::Shape>> shapes_bidi_static = {
{{4, 1}},
{{1, 4, 1}},
{{4, 1, 1}}
};
std::vector<std::vector<size_t>> targetShapesBidi = {
@ -124,9 +141,9 @@ std::vector<std::vector<size_t>> targetShapesBidi = {
const auto bidirectionalBroadcastParams = ::testing::Combine(
::testing::ValuesIn(targetShapesBidi),
::testing::Values(ngraph::AxisSet{}), //not used in bidirectional mode
::testing::Values(ngraph::op::BroadcastType::BIDIRECTIONAL),
::testing::ValuesIn(inShapesBidi),
::testing::Values(ov::AxisSet{}), //not used in bidirectional mode
::testing::Values(ov::op::BroadcastType::BIDIRECTIONAL),
::testing::ValuesIn(ov::test::static_shapes_to_test_representation(shapes_bidi_static)),
::testing::ValuesIn(inputPrecisions),
::testing::Values(ov::test::utils::DEVICE_CPU)
);
@ -135,15 +152,18 @@ INSTANTIATE_TEST_CASE_P(smoke_TestBidirectionalBroadcast, BroadcastLayerTest, bi
// EXPLICIT MODE ///////////////////////////////////////
// 1D
std::vector<std::vector<size_t>> inShapesExplicit1D = { {4} };
std::vector<std::vector<ov::Shape>> input_shapes_explicit_1d_static = {
{{ 4 }}
};
std::vector<std::vector<size_t>> targetShapesExplicit1D = { {4, 2, 4}, {4, 2, 4, 1} };
std::vector<ngraph::AxisSet> axes1D = { {0}, {2} };
std::vector<ov::AxisSet> axes1D = { {0}, {2} };
const auto explicitBroadcast1DInputParams = ::testing::Combine(
::testing::ValuesIn(targetShapesExplicit1D),
::testing::ValuesIn(axes1D),
::testing::Values(ngraph::op::BroadcastType::EXPLICIT),
::testing::ValuesIn(inShapesExplicit1D),
::testing::Values(ov::op::BroadcastType::EXPLICIT),
::testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_explicit_1d_static)),
::testing::ValuesIn(inputPrecisions),
::testing::Values(ov::test::utils::DEVICE_CPU)
);
@ -152,9 +172,9 @@ INSTANTIATE_TEST_CASE_P(smoke_TestExplicitBroadcast1D, BroadcastLayerTest, expli
const auto bidirectionalBroadcastParams3 = ::testing::Combine(
::testing::Values(targetShapesBidi[2]),
::testing::Values(ngraph::AxisSet{}), //not used in bidirectional mode
::testing::Values(ngraph::op::BroadcastType::BIDIRECTIONAL),
::testing::Values(inShapesBidi[2]),
::testing::Values(ov::AxisSet{}), //not used in bidirectional mode
::testing::Values(ov::op::BroadcastType::BIDIRECTIONAL),
::testing::Values(ov::test::static_shapes_to_test_representation(shapes_bidi_static[2])),
::testing::ValuesIn(inputPrecisions),
::testing::Values(ov::test::utils::DEVICE_CPU)
);
@ -179,15 +199,18 @@ std::vector<std::vector<size_t>> targetShapesExplicit = {
};
// 2D
std::vector<std::vector<size_t>> inShapesExplicit2D = { {2, 4} };
std::vector<std::vector<ov::Shape>> input_shapes_explicit_2d_static = {
{{ 2, 4 }}
};
std::vector<std::vector<size_t>> targetShapesExplicit2D = { {2, 2, 4}, {2, 2, 4, 1}};
std::vector<ngraph::AxisSet> axes2D = { {1, 2}, {0, 2} };
std::vector<ov::AxisSet> axes2D = { {1, 2}, {0, 2} };
const auto explicitBroadcast2DInputParams = ::testing::Combine(
::testing::ValuesIn(targetShapesExplicit2D),
::testing::ValuesIn(axes2D),
::testing::Values(ngraph::op::BroadcastType::EXPLICIT),
::testing::ValuesIn(inShapesExplicit2D),
::testing::Values(ov::op::BroadcastType::EXPLICIT),
::testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_explicit_2d_static)),
::testing::ValuesIn(inputPrecisions),
::testing::Values(ov::test::utils::DEVICE_CPU)
);
@ -195,15 +218,17 @@ const auto explicitBroadcast2DInputParams = ::testing::Combine(
INSTANTIATE_TEST_CASE_P(smoke_TestExplicitBroadcast2D, BroadcastLayerTest, explicitBroadcast2DInputParams, BroadcastLayerTest::getTestCaseName);
// 3D
std::vector<std::vector<size_t>> inShapesExplicit3D = { {2, 2, 2} };
std::vector<std::vector<ov::Shape>> input_shapes_explicit_3d_static = {
{{ 2, 2, 2 }}
};
std::vector<std::vector<size_t>> targetShapesExplicit3D = { {2, 2, 2, 2} };
std::vector<ngraph::AxisSet> axes3D = { {0, 1, 2}, {0, 1, 3}, {0, 2, 3}, {1, 2, 3} };
std::vector<ov::AxisSet> axes3D = { {0, 1, 2}, {0, 1, 3}, {0, 2, 3}, {1, 2, 3} };
const auto explicitBroadcast3DInputParams = ::testing::Combine(
::testing::ValuesIn(targetShapesExplicit3D),
::testing::ValuesIn(axes3D),
::testing::Values(ngraph::op::BroadcastType::EXPLICIT),
::testing::ValuesIn(inShapesExplicit3D),
::testing::Values(ov::op::BroadcastType::EXPLICIT),
::testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_explicit_3d_static)),
::testing::ValuesIn(inputPrecisions),
::testing::Values(ov::test::utils::DEVICE_CPU)
);

View File

@ -4,19 +4,40 @@
#include <vector>
#include <ngraph/op/util/attr_types.hpp>
#include "single_layer_tests/gru_sequence.hpp"
#include "single_op_tests/gru_sequence.hpp"
#include "common_test_utils/test_constants.hpp"
#include "common_test_utils/test_enums.hpp"
using namespace LayerTestsDefinitions;
using ov::test::GRUSequenceTest;
using ov::test::utils::InputLayerType;
using ov::test::utils::SequenceTestsMode;
namespace {
std::vector<ngraph::helpers::SequenceTestsMode> mode{ngraph::helpers::SequenceTestsMode::CONVERT_TO_TI_MAX_SEQ_LEN_CONST,
ngraph::helpers::SequenceTestsMode::CONVERT_TO_TI_RAND_SEQ_LEN_CONST,
ngraph::helpers::SequenceTestsMode::CONVERT_TO_TI_RAND_SEQ_LEN_PARAM,
ngraph::helpers::SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_PARAM,
ngraph::helpers::SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_CONST,
ngraph::helpers::SequenceTestsMode::PURE_SEQ};
std::vector<SequenceTestsMode> mode{SequenceTestsMode::CONVERT_TO_TI_MAX_SEQ_LEN_CONST,
SequenceTestsMode::CONVERT_TO_TI_RAND_SEQ_LEN_CONST,
SequenceTestsMode::CONVERT_TO_TI_RAND_SEQ_LEN_PARAM,
SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_PARAM,
SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_CONST,
SequenceTestsMode::PURE_SEQ};
// output values increase rapidly without clip, so use only seq_lengths = 2
const std::vector<std::vector<ov::Shape>> input_shapes_zero_clip_static = {
// {batch, seq_lengths, input_size}, {batch, num_directions, hidden_size}, {batch},
{{ 10, 2, 1}, { 10, 1, 1 }, { 10 }},
{{ 10, 2, 1}, { 10, 1, 10 }, { 10 }},
};
const std::vector<std::vector<ov::Shape>> input_shapes_bidirect_zero_clip_static = {
{{ 10, 2, 1}, { 10, 2, 1 }, { 10 }},
{{ 10, 2, 1}, { 10, 2, 10 }, { 10 }},
};
const std::vector<std::vector<ov::Shape>> input_shapes_non_zero_clip_static = {
{{ 10, 20, 1}, { 10, 1, 1 }, { 10 }},
{{ 10, 20, 1}, { 10, 1, 10 }, { 10 }},
};
const std::vector<std::vector<ov::Shape>> input_shapes_bidirect_non_zero_clip_static = {
{{ 10, 20, 1}, { 10, 2, 1 }, { 10 }},
{{ 10, 20, 1}, { 10, 2, 10 }, { 10 }},
};
std::vector<size_t> seq_lengths_zero_clip{2};
std::vector<size_t> seq_lengths_clip_non_zero{20};
std::vector<size_t> batch{10};
@ -27,41 +48,37 @@ namespace {
std::vector<bool> linear_before_reset = {true, false};
std::vector<float> clip{0.f};
std::vector<float> clip_non_zeros{0.7f};
std::vector<ngraph::op::RecurrentSequenceDirection> direction = {ngraph::op::RecurrentSequenceDirection::FORWARD,
ngraph::op::RecurrentSequenceDirection::REVERSE,
ngraph::op::RecurrentSequenceDirection::BIDIRECTIONAL
};
std::vector<InferenceEngine::Precision> netPrecisions = {InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16};
std::vector<ov::op::RecurrentSequenceDirection> direction = {ov::op::RecurrentSequenceDirection::FORWARD,
ov::op::RecurrentSequenceDirection::REVERSE};
std::vector<ov::op::RecurrentSequenceDirection> direction_bi = {ov::op::RecurrentSequenceDirection::BIDIRECTIONAL};
std::vector<ov::element::Type> netPrecisions = {ov::element::f32,
ov::element::f16};
INSTANTIATE_TEST_SUITE_P(smoke_GRUSequenceCommonZeroClip, GRUSequenceTest,
::testing::Combine(
::testing::ValuesIn(mode),
::testing::ValuesIn(seq_lengths_zero_clip),
::testing::ValuesIn(batch),
::testing::ValuesIn(hidden_size),
::testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_zero_clip_static)),
// ::testing::ValuesIn(input_size), // hardcoded to 10 due to Combine supports up to 10 args
::testing::ValuesIn(activations),
::testing::ValuesIn(clip),
::testing::ValuesIn(linear_before_reset),
::testing::ValuesIn(direction),
::testing::Values(ngraph::helpers::InputLayerType::CONSTANT),
::testing::Values(InputLayerType::CONSTANT),
::testing::ValuesIn(netPrecisions),
::testing::Values(ov::test::utils::DEVICE_CPU)),
GRUSequenceTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_GRUSequenceCommonZeroClipNonConstantWRB, GRUSequenceTest,
INSTANTIATE_TEST_SUITE_P(smoke_GRUSequenceCommonZeroClipBidirect, GRUSequenceTest,
::testing::Combine(
::testing::Values(ngraph::helpers::SequenceTestsMode::PURE_SEQ),
::testing::ValuesIn(seq_lengths_zero_clip),
::testing::ValuesIn(batch),
::testing::ValuesIn(hidden_size),
::testing::ValuesIn(mode),
::testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_bidirect_zero_clip_static)),
// ::testing::ValuesIn(input_size), // hardcoded to 10 due to Combine supports up to 10 args
::testing::ValuesIn(activations),
::testing::ValuesIn(clip),
::testing::ValuesIn(linear_before_reset),
::testing::ValuesIn(direction),
::testing::Values(ngraph::helpers::InputLayerType::PARAMETER),
::testing::ValuesIn(direction_bi),
::testing::Values(InputLayerType::CONSTANT),
::testing::ValuesIn(netPrecisions),
::testing::Values(ov::test::utils::DEVICE_CPU)),
GRUSequenceTest::getTestCaseName);
@ -69,15 +86,27 @@ namespace {
INSTANTIATE_TEST_SUITE_P(smoke_GRUSequenceCommonClip, GRUSequenceTest,
::testing::Combine(
::testing::ValuesIn(mode),
::testing::ValuesIn(seq_lengths_clip_non_zero),
::testing::ValuesIn(batch),
::testing::ValuesIn(hidden_size),
::testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_non_zero_clip_static)),
// ::testing::ValuesIn(input_size), // hardcoded to 10 due to Combine supports up to 10 args
::testing::ValuesIn(activations),
::testing::ValuesIn(clip_non_zeros),
::testing::ValuesIn(linear_before_reset),
::testing::ValuesIn(direction),
::testing::Values(ngraph::helpers::InputLayerType::CONSTANT),
::testing::Values(InputLayerType::CONSTANT),
::testing::ValuesIn(netPrecisions),
::testing::Values(ov::test::utils::DEVICE_CPU)),
GRUSequenceTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_GRUSequenceCommonClipBidirect, GRUSequenceTest,
::testing::Combine(
::testing::ValuesIn(mode),
::testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_bidirect_non_zero_clip_static)),
// ::testing::ValuesIn(input_size), // hardcoded to 10 due to Combine supports up to 10 args
::testing::ValuesIn(activations),
::testing::ValuesIn(clip_non_zeros),
::testing::ValuesIn(linear_before_reset),
::testing::ValuesIn(direction_bi),
::testing::Values(InputLayerType::CONSTANT),
::testing::ValuesIn(netPrecisions),
::testing::Values(ov::test::utils::DEVICE_CPU)),
GRUSequenceTest::getTestCaseName);

View File

@ -0,0 +1,15 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "shared_test_classes/single_op/broadcast.hpp"
namespace ov {
namespace test {
TEST_P(BroadcastLayerTest, CompareWithRefs) {
run();
}
} // namespace test
} // namespace ov

View File

@ -0,0 +1,15 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "shared_test_classes/single_op/gru_sequence.hpp"
namespace ov {
namespace test {
TEST_P(GRUSequenceTest, DISABLED_Inference) {
run();
};
} // namespace test
} // namespace ov

View File

@ -10,8 +10,6 @@
#include <memory>
#include "shared_test_classes/base/layer_test_utils.hpp"
#include "ngraph_functions/builders.hpp"
#include "ngraph_functions/utils/ngraph_helpers.hpp"
namespace LayerTestsDefinitions {

View File

@ -12,6 +12,7 @@
#include "shared_test_classes/base/layer_test_utils.hpp"
#include "ngraph_functions/builders.hpp"
#include "ngraph_functions/utils/ngraph_helpers.hpp"
#include "common_test_utils/test_enums.hpp"
namespace LayerTestsDefinitions {

View File

@ -0,0 +1,32 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <tuple>
#include <string>
#include <vector>
#include "shared_test_classes/base/ov_subgraph.hpp"
namespace ov {
namespace test {
using BroadcastParamsTuple = typename std::tuple<
std::vector<size_t>, // target shape
ov::AxisSet, // axes mapping
ov::op::BroadcastType, // broadcast mode
std::vector<InputShape>, // Input shape
ov::element::Type, // Model type
std::string>; // Device name
class BroadcastLayerTest : public testing::WithParamInterface<BroadcastParamsTuple>,
virtual public ov::test::SubgraphBaseTest {
public:
static std::string getTestCaseName(const testing::TestParamInfo<BroadcastParamsTuple> &obj);
protected:
void SetUp() override;
};
} // namespace test
} // namespace ov

View File

@ -0,0 +1,38 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <tuple>
#include <string>
#include <vector>
#include "shared_test_classes/base/ov_subgraph.hpp"
#include "common_test_utils/test_enums.hpp"
namespace ov {
namespace test {
using GRUSequenceParams = typename std::tuple<
ov::test::utils::SequenceTestsMode, // pure Sequence or TensorIterator
std::vector<InputShape>, // shapes
std::vector<std::string>, // activations
float, // clip
bool, // linear_before_reset
ov::op::RecurrentSequenceDirection, // direction
ov::test::utils::InputLayerType, // WRB input type (Constant or Parameter)
ov::element::Type, // Network precision
std::string>; // Device name
class GRUSequenceTest : public testing::WithParamInterface<GRUSequenceParams>,
virtual public ov::test::SubgraphBaseTest {
public:
static std::string getTestCaseName(const testing::TestParamInfo<GRUSequenceParams> &obj);
protected:
void SetUp() override;
void generate_inputs(const std::vector<ov::Shape>& targetInputStaticShapes) override;
ov::test::utils::SequenceTestsMode m_mode;
int64_t m_max_seq_len = 0;
};
} // namespace test
} // namespace ov

View File

@ -4,6 +4,8 @@
#include "shared_test_classes/single_layer/broadcast.hpp"
#include "ngraph_functions/builders.hpp"
namespace LayerTestsDefinitions {
std::string BroadcastLayerTest::getTestCaseName(const testing::TestParamInfo<BroadcastParamsTuple>& obj) {
InferenceEngine::SizeVector targetShape;

View File

@ -5,6 +5,8 @@
#include "shared_test_classes/single_layer/gru_sequence.hpp"
#include "transformations/op_conversions/bidirectional_sequences_decomposition.hpp"
#include "transformations/op_conversions/convert_sequences_to_tensor_iterator.hpp"
#include "common_test_utils/ov_tensor_utils.hpp"
#include "common_test_utils/test_enums.hpp"
namespace LayerTestsDefinitions {

View File

@ -0,0 +1,71 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "shared_test_classes/single_op/broadcast.hpp"
#include "openvino/op/broadcast.hpp"
#include "openvino/op/constant.hpp"
#include "openvino/op/parameter.hpp"
#include "openvino/op/result.hpp"
namespace ov {
namespace test {
std::string BroadcastLayerTest::getTestCaseName(const testing::TestParamInfo<BroadcastParamsTuple>& obj) {
ov::Shape target_shape;
ov::AxisSet axes_mapping;
ov::op::BroadcastType mode;
std::vector<InputShape> shapes;
ov::element::Type type;
std::string device_name;
std::tie(target_shape, axes_mapping, mode, shapes, type, device_name) = obj.param;
std::ostringstream result;
result << "targetShape=" << ov::test::utils::vec2str(target_shape) << "_";
result << "axesMapping=" << ov::test::utils::set2str(axes_mapping) << "_";
result << "mode=" << mode << "_";
result << "IS=(";
for (const auto& shape : shapes) {
result << ov::test::utils::partialShape2str({shape.first}) << "_";
}
result << ")_TS=(";
for (const auto& shape : shapes) {
for (const auto& item : shape.second) {
result << ov::test::utils::vec2str(item) << "_";
}
} result << "IT=" << type.get_type_name() << "_";
result << "trgDev=" << device_name;
return result.str();
}
void BroadcastLayerTest::SetUp() {
std::vector<size_t> target_shape;
ov::AxisSet axes_mapping;
ov::op::BroadcastType mode;
std::vector<InputShape> shapes;
ov::element::Type model_type;
std::tie(target_shape, axes_mapping, mode, shapes, model_type, targetDevice) = this->GetParam();
init_input_shapes(shapes);
auto target_shape_const = ov::op::v0::Constant::create(ov::element::i64, {target_shape.size()}, target_shape);
ov::ParameterVector params{std::make_shared<ov::op::v0::Parameter>(model_type, inputDynamicShapes.front())};
std::shared_ptr<ov::Node> broadcast;
if (mode == ngraph::op::BroadcastType::NONE) {
auto axis_set_const = ov::op::v0::Constant::create(ngraph::element::i64, {axes_mapping.size()}, axes_mapping.to_vector());
broadcast = std::make_shared<ov::op::v3::Broadcast>(params[0],
target_shape_const,
axis_set_const,
mode);
} else { // numpy/bidirectional modes
broadcast = std::make_shared<ov::op::v3::Broadcast>(params[0],
target_shape_const,
mode);
}
ov::ResultVector results{std::make_shared<ov::op::v0::Result>(broadcast)};
function = std::make_shared<ov::Model>(results, params, "BroadcastInference");
}
} // namespace test
} // namespace ov

View File

@ -0,0 +1,161 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "shared_test_classes/single_op/gru_sequence.hpp"
#include "transformations/op_conversions/bidirectional_sequences_decomposition.hpp"
#include "transformations/op_conversions/convert_sequences_to_tensor_iterator.hpp"
#include "openvino/pass/manager.hpp"
#include "common_test_utils/ov_tensor_utils.hpp"
#include "ngraph_functions/utils/ngraph_helpers.hpp"
namespace ov {
namespace test {
using ov::test::utils::InputLayerType;
using ov::test::utils::SequenceTestsMode;
using ngraph::helpers::is_tensor_iterator_exist;
std::string GRUSequenceTest::getTestCaseName(const testing::TestParamInfo<GRUSequenceParams> &obj) {
std::vector<InputShape> shapes;
SequenceTestsMode mode;
std::vector<std::string> activations;
std::vector<float> activations_alpha;
std::vector<float> activations_beta;
float clip;
bool linear_before_reset;
ov::op::RecurrentSequenceDirection direction;
InputLayerType WRBType;
ov::element::Type type;
std::string targetDevice;
std::tie(mode, shapes, activations, clip, linear_before_reset, direction, WRBType,
type, targetDevice) = obj.param;
std::ostringstream result;
result << "mode=" << mode << "_";
result << "IS=(";
for (const auto& shape : shapes) {
result << ov::test::utils::partialShape2str({shape.first}) << "_";
}
result << ")_TS=(";
for (const auto& shape : shapes) {
for (const auto& item : shape.second) {
result << ov::test::utils::vec2str(item) << "_";
}
}
result << "linear_before_reset=" << linear_before_reset << "_";
result << "activations=" << ov::test::utils::vec2str(activations) << "_";
result << "direction=" << direction << "_";
result << "WRBType=" << WRBType << "_";
result << "clip=" << clip << "_";
result << "IT=" << type.get_type_name() << "_";
result << "targetDevice=" << targetDevice << "_";
return result.str();
}
void GRUSequenceTest::SetUp() {
std::vector<InputShape> shapes;
std::vector<std::string> activations;
std::vector<float> activations_alpha;
std::vector<float> activations_beta;
float clip;
bool linear_before_reset;
ov::op::RecurrentSequenceDirection direction;
InputLayerType wbr_type;
std::tie(m_mode, shapes, activations, clip, linear_before_reset, direction, wbr_type,
inType, targetDevice) = this->GetParam();
outType = inType;
init_input_shapes(shapes);
if (inType == ElementType::bf16 || inType == ElementType::f16) {
rel_threshold = 1e-2;
}
const size_t seq_lengths = targetStaticShapes.front()[0][1];
const size_t hidden_size = targetStaticShapes.front()[1][2];
const size_t input_size = targetStaticShapes.front()[0][2];
const size_t num_directions = direction == ov::op::RecurrentSequenceDirection::BIDIRECTIONAL ? 2 : 1;
const size_t batch = inputDynamicShapes[0][0].is_static() ? inputDynamicShapes[0][0].get_length() :
inputDynamicShapes[1][0].is_static() ? inputDynamicShapes[1][0].get_length() :
inputDynamicShapes.size() > 2 && inputDynamicShapes[2][0].is_static() ? inputDynamicShapes[2][0].get_length() :
1lu;
ov::ParameterVector params{std::make_shared<ov::op::v0::Parameter>(inType, inputDynamicShapes[0]),
std::make_shared<ov::op::v0::Parameter>(inType, inputDynamicShapes[1])};
const auto& w_shape = ov::Shape{num_directions, 3 * hidden_size, input_size};
const auto& r_shape = ov::Shape{num_directions, 3 * hidden_size, hidden_size};
const auto& b_shape = ov::Shape{num_directions, (linear_before_reset ? 4 : 3) * hidden_size};
std::shared_ptr<ov::Node> seq_lengths_node;
if (m_mode == SequenceTestsMode::CONVERT_TO_TI_MAX_SEQ_LEN_PARAM ||
m_mode == SequenceTestsMode::CONVERT_TO_TI_RAND_SEQ_LEN_PARAM ||
m_mode == SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_PARAM) {
auto param = std::make_shared<ov::op::v0::Parameter>(ov::element::i64, inputDynamicShapes[2]);
param->set_friendly_name("seq_lengths");
params.push_back(param);
seq_lengths_node = param;
} else if (m_mode == SequenceTestsMode::CONVERT_TO_TI_RAND_SEQ_LEN_CONST ||
m_mode == SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_CONST) {
auto tensor = ov::test::utils::create_and_fill_tensor(ov::element::i64, targetStaticShapes[0][2], seq_lengths, 0);
seq_lengths_node = std::make_shared<ov::op::v0::Constant>(tensor);
} else {
std::vector<int64_t> lengths(batch, seq_lengths);
seq_lengths_node = std::make_shared<ov::op::v0::Constant>(ov::element::i64, targetStaticShapes[0][2], lengths);
}
std::shared_ptr<ov::Node> w, r, b;
if (wbr_type == InputLayerType::PARAMETER) {
const auto w_param = std::make_shared<ov::op::v0::Parameter>(inType, w_shape);
const auto r_param = std::make_shared<ov::op::v0::Parameter>(inType, r_shape);
const auto b_param = std::make_shared<ov::op::v0::Parameter>(inType, b_shape);
w = w_param;
r = r_param;
b = b_param;
params.push_back(w_param);
params.push_back(r_param);
params.push_back(b_param);
} else {
auto tensor_w = ov::test::utils::create_and_fill_tensor(inType, w_shape);
w = std::make_shared<ov::op::v0::Constant>(tensor_w);
auto tensor_R = ov::test::utils::create_and_fill_tensor(inType, r_shape);
r = std::make_shared<ov::op::v0::Constant>(tensor_R);
auto tensor_B = ov::test::utils::create_and_fill_tensor(inType, b_shape);
b = std::make_shared<ov::op::v0::Constant>(tensor_B);
}
auto gru_sequence = std::make_shared<ov::op::v5::GRUSequence>(params[0], params[1], seq_lengths_node, w, r, b, hidden_size, direction,
activations, activations_alpha, activations_beta, clip, linear_before_reset);
ov::OutputVector results{std::make_shared<ov::op::v0::Result>(gru_sequence->output(0)),
std::make_shared<ov::op::v0::Result>(gru_sequence->output(1))};
function = std::make_shared<ov::Model>(results, params, "gru_sequence");
bool is_pure_sequence = (m_mode == SequenceTestsMode::PURE_SEQ ||
m_mode == SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_PARAM ||
m_mode == SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_CONST);
if (!is_pure_sequence) {
ov::pass::Manager manager;
if (direction == ov::op::RecurrentSequenceDirection::BIDIRECTIONAL)
manager.register_pass<ov::pass::BidirectionalGRUSequenceDecomposition>();
manager.register_pass<ov::pass::ConvertGRUSequenceToTensorIterator>();
manager.run_passes(function);
bool ti_found = is_tensor_iterator_exist(function);
EXPECT_EQ(ti_found, true);
} else {
bool ti_found = is_tensor_iterator_exist(function);
EXPECT_EQ(ti_found, false);
}
}
void GRUSequenceTest::generate_inputs(const std::vector<ov::Shape>& target_input_static_shapes) {
inputs.clear();
auto params = function->get_parameters();
OPENVINO_ASSERT(target_input_static_shapes.size() >= params.size());
for (int i = 0; i < params.size(); i++) {
auto tensor = ov::test::utils::create_and_fill_tensor(params[i]->get_element_type(), target_input_static_shapes[i], m_max_seq_len, 0);
inputs.insert({params[i], tensor});
}
}
} // namespace test
} // namespace ov

View File

@ -4,15 +4,42 @@
#pragma once
#include <ostream>
namespace ov {
namespace test {
namespace utils {
enum ComparisonTypes { EQUAL, NOT_EQUAL, IS_FINITE, IS_INF, IS_NAN, LESS, LESS_EQUAL, GREATER, GREATER_EQUAL };
enum class ComparisonTypes { EQUAL, NOT_EQUAL, IS_FINITE, IS_INF, IS_NAN, LESS, LESS_EQUAL, GREATER, GREATER_EQUAL };
enum ConversionTypes { CONVERT, CONVERT_LIKE };
enum class ConversionTypes { CONVERT, CONVERT_LIKE };
enum ReductionType { Mean, Max, Min, Prod, Sum, LogicalOr, LogicalAnd, L1, L2 };
enum class ReductionType { Mean, Max, Min, Prod, Sum, LogicalOr, LogicalAnd, L1, L2 };
enum class InputLayerType {
CONSTANT,
PARAMETER,
};
enum class SequenceTestsMode {
PURE_SEQ,
PURE_SEQ_RAND_SEQ_LEN_CONST,
PURE_SEQ_RAND_SEQ_LEN_PARAM,
CONVERT_TO_TI_MAX_SEQ_LEN_CONST,
CONVERT_TO_TI_MAX_SEQ_LEN_PARAM,
CONVERT_TO_TI_RAND_SEQ_LEN_CONST,
CONVERT_TO_TI_RAND_SEQ_LEN_PARAM,
};
std::ostream& operator<<(std::ostream& os, const ComparisonTypes type);
std::ostream& operator<<(std::ostream& os, const ConversionTypes type);
std::ostream& operator<<(std::ostream& os, const ReductionType type);
std::ostream& operator<<(std::ostream& os, const InputLayerType type);
std::ostream& operator<<(std::ostream& os, const SequenceTestsMode type);
} // namespace utils
} // namespace test

View File

@ -0,0 +1,138 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "common_test_utils/test_enums.hpp"
namespace ov {
namespace test {
namespace utils {
std::ostream& operator<<(std::ostream& os, const ComparisonTypes type) {
switch (type) {
case ComparisonTypes::EQUAL:
os << "Equal";
break;
case ComparisonTypes::NOT_EQUAL:
os << "NotEqual";
break;
case ComparisonTypes::GREATER:
os << "Greater";
break;
case ComparisonTypes::GREATER_EQUAL:
os << "GreaterEqual";
break;
case ComparisonTypes::IS_FINITE:
os << "IsFinite";
break;
case ComparisonTypes::IS_INF:
os << "IsInf";
break;
case ComparisonTypes::IS_NAN:
os << "IsNaN";
break;
case ComparisonTypes::LESS:
os << "Less";
break;
case ComparisonTypes::LESS_EQUAL:
os << "LessEqual";
break;
default:
throw std::runtime_error("NOT_SUPPORTED_COMPARISON_TYPE");
}
return os;
}
std::ostream& operator<<(std::ostream& os, const ConversionTypes type) {
switch (type) {
case ConversionTypes::CONVERT:
os << "Convert";
break;
case ConversionTypes::CONVERT_LIKE:
os << "ConvertLike";
break;
default:
throw std::runtime_error("NOT_SUPPORTED_CONVERSION_TYPE");
}
return os;
}
std::ostream& operator<<(std::ostream& os, const ReductionType type) {
switch (type) {
case ReductionType::Mean:
os << "Mean";
break;
case ReductionType::Max:
os << "Max";
break;
case ReductionType::Min:
os << "Min";
break;
case ReductionType::Prod:
os << "Prod";
break;
case ReductionType::Sum:
os << "Sum";
break;
case ReductionType::LogicalOr:
os << "LogicalOr";
break;
case ReductionType::LogicalAnd:
os << "LogicalAnd";
break;
case ReductionType::L1:
os << "ReduceL1";
break;
case ReductionType::L2:
os << "ReduceL2";
break;
default:
throw std::runtime_error("NOT_SUPPORTED_REDUCTION_TYPE");
}
return os;
}
std::ostream& operator<<(std::ostream& os, const InputLayerType type) {
switch (type) {
case InputLayerType::CONSTANT:
os << "CONSTANT";
break;
case InputLayerType::PARAMETER:
os << "PARAMETER";
break;
default:
throw std::runtime_error("NOT_SUPPORTED_INPUT_LAYER_TYPE");
}
return os;
}
std::ostream& operator<<(std::ostream& os, const SequenceTestsMode type) {
switch (type) {
case SequenceTestsMode::PURE_SEQ:
os << "PURE_SEQ";
break;
case SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_CONST:
os << "PURE_SEQ_RAND_SEQ_LEN_CONST";
break;
case SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_PARAM:
os << "PURE_SEQ_RAND_SEQ_LEN_PARAM";
break;
case SequenceTestsMode::CONVERT_TO_TI_RAND_SEQ_LEN_PARAM:
os << "CONVERT_TO_TI_RAND_SEQ_LEN_PARAM";
break;
case SequenceTestsMode::CONVERT_TO_TI_RAND_SEQ_LEN_CONST:
os << "CONVERT_TO_TI_RAND_SEQ_LEN_CONST";
break;
case SequenceTestsMode::CONVERT_TO_TI_MAX_SEQ_LEN_PARAM:
os << "CONVERT_TO_TI_MAX_SEQ_LEN_PARAM";
break;
case SequenceTestsMode::CONVERT_TO_TI_MAX_SEQ_LEN_CONST:
os << "CONVERT_TO_TI_MAX_SEQ_LEN_CONST";
break;
default:
throw std::runtime_error("NOT_SUPPORTED_OP_TYPE");
}
return os;
}
} // namespace utils
} // namespace test
} // namespace ov