Refactor LoopLayerTest, LrnLayerTest, LSTMSequenceTest, LSTMCellTest (#20269)
* Refactor LoopLayerTest * Refactor LrnLayerTest * LSTMCellTest * Refactor LSTMSequenceTest
This commit is contained in:
parent
0060b26b74
commit
246d8efada
@ -2,67 +2,37 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <vector>
|
||||
#include "single_layer_tests/loop.hpp"
|
||||
#include "single_op_tests/loop.hpp"
|
||||
#include "common_test_utils/test_constants.hpp"
|
||||
|
||||
using namespace LayerTestsDefinitions;
|
||||
|
||||
namespace {
|
||||
using ov::test::LoopLayerTest;
|
||||
using ov::test::LOOP_IN_TYPE;
|
||||
|
||||
// without clip values increase rapidly, so use only seq_lengths = 2
|
||||
std::vector<bool> execute_first_iteration{true};
|
||||
std::vector<bool> is_body_condition_const{true/*, false*/};
|
||||
std::vector<bool> body_condition{true/*, false*/}; // works only if is_body_condition_const == true
|
||||
std::vector<int64_t> trip_count{1, 10/*, -1*/}; // -1 means infinity
|
||||
std::vector<std::vector<std::pair<std::vector<size_t>, LOOP_IN_TYPE>>> inputs = {
|
||||
{{{32, 1, 10}, LOOP_IN_TYPE::INVARIANT}, {{32, 1, 10}, LOOP_IN_TYPE::INVARIANT}, {{32, 1, 10}, LOOP_IN_TYPE::MERGED}},
|
||||
};
|
||||
std::vector<InferenceEngine::Precision> netPrecisions = {InferenceEngine::Precision::FP32,
|
||||
InferenceEngine::Precision::FP16};
|
||||
std::vector<ov::Shape> input_shapes_static = {{32, 1, 10}};
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_LoopCommonZeroClip, LoopTest,
|
||||
std::vector<std::vector<LOOP_IN_TYPE>> inputs_types = {
|
||||
{LOOP_IN_TYPE::INVARIANT},
|
||||
{LOOP_IN_TYPE::MERGED}};
|
||||
|
||||
std::vector<ov::element::Type> model_types = {
|
||||
ov::element::f32,
|
||||
ov::element::f16};
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_LoopCommonZeroClip, LoopLayerTest,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(execute_first_iteration),
|
||||
::testing::ValuesIn(is_body_condition_const),
|
||||
::testing::ValuesIn(body_condition),
|
||||
::testing::ValuesIn(trip_count),
|
||||
::testing::ValuesIn(inputs),
|
||||
::testing::ValuesIn(netPrecisions),
|
||||
::testing::Values(ov::test::static_shapes_to_test_representation(input_shapes_static)),
|
||||
::testing::ValuesIn(inputs_types),
|
||||
::testing::ValuesIn(model_types),
|
||||
::testing::Values(ov::test::utils::DEVICE_CPU)),
|
||||
LoopTest::getTestCaseName);
|
||||
|
||||
static const std::vector<std::tuple<bool, int64_t, int64_t, int64_t>> static_loop_types {
|
||||
// GCC4.8 limitation: have to specify type of each element in list
|
||||
// static_trip_count | max | dynamic_exit | axis
|
||||
std::tuple<bool, int64_t, int64_t, int64_t>{ true , 5, -1, -1 }, // n_iter 5, no dynamic exit
|
||||
std::tuple<bool, int64_t, int64_t, int64_t>{ true , 5, 3, -1 }, // n_iter 3, dynamic exit on 3
|
||||
std::tuple<bool, int64_t, int64_t, int64_t>{ true , 5, 7, -1 }, // n_iter 5, dynamic exit not reached
|
||||
std::tuple<bool, int64_t, int64_t, int64_t>{ true , -1, 5, -1 }, // n_iter 5, inf loop with dynamic exit on 5
|
||||
std::tuple<bool, int64_t, int64_t, int64_t>{ true , 5, -1, 1 }, // n_iter 5, const for loop with auto concatenated out
|
||||
std::tuple<bool, int64_t, int64_t, int64_t>{ false , 5, -1, -1 }, // |
|
||||
std::tuple<bool, int64_t, int64_t, int64_t>{ false , 5, 3, -1 }, // | same with dynamic trip count
|
||||
std::tuple<bool, int64_t, int64_t, int64_t>{ false , 5, 7, -1 }, // |
|
||||
std::tuple<bool, int64_t, int64_t, int64_t>{ false , -1, 5, -1 } // |
|
||||
};
|
||||
|
||||
using namespace testing;
|
||||
using namespace InferenceEngine;
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_StaticShapeLoop, StaticShapeLoopTest,
|
||||
Combine(
|
||||
ValuesIn(std::vector<bool>{true, false}),
|
||||
Values(true),
|
||||
ValuesIn(static_loop_types),
|
||||
Values<int64_t>(7),
|
||||
Values<InferenceEngine::SizeVector>({2, 1, 4}),
|
||||
Values<InferenceEngine::Precision>(Precision::FP32, Precision::I32),
|
||||
Values(ov::test::utils::DEVICE_CPU),
|
||||
Values<std::map<std::string, std::string>>({})));
|
||||
using namespace testing;
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_TrivialLoop, TrivialLoopTest,
|
||||
Combine(
|
||||
Values<InferenceEngine::Precision>(Precision::FP32, Precision::I32),
|
||||
Values<InferenceEngine::SizeVector>({2, 3, 4}),
|
||||
Values(ov::test::utils::DEVICE_CPU)));
|
||||
|
||||
LoopLayerTest::getTestCaseName);
|
||||
} // namespace
|
||||
|
@ -2,94 +2,88 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "single_layer_tests/lrn.hpp"
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "single_op_tests/lrn.hpp"
|
||||
#include "common_test_utils/test_constants.hpp"
|
||||
|
||||
using namespace LayerTestsDefinitions;
|
||||
namespace {
|
||||
using ov::test::LrnLayerTest;
|
||||
|
||||
const std::vector<InferenceEngine::Precision> netPrecisions{
|
||||
InferenceEngine::Precision::FP32
|
||||
};
|
||||
// common values
|
||||
|
||||
const std::vector<ov::element::Type> model_types{ov::element::f32};
|
||||
const double alpha = 9.9e-05;
|
||||
const double beta = 2;
|
||||
const double bias = 1.0;
|
||||
const size_t size = 5;
|
||||
|
||||
namespace LRN2D {
|
||||
// 2D
|
||||
|
||||
const std::vector<std::vector<int64_t>> axes = {{1}};
|
||||
const std::vector<std::vector<int64_t>> axes_2d = {{1}};
|
||||
std::vector<ov::Shape> input_shapes_2d_static = {{10, 16}};
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_LrnCheck_2D, LrnLayerTest,
|
||||
::testing::Combine(::testing::Values(alpha),
|
||||
::testing::Values(beta),
|
||||
::testing::Values(bias),
|
||||
::testing::Values(size),
|
||||
::testing::ValuesIn(axes),
|
||||
::testing::ValuesIn(netPrecisions),
|
||||
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||
::testing::Values(std::vector<size_t>({10, 16})),
|
||||
::testing::ValuesIn(axes_2d),
|
||||
::testing::ValuesIn(model_types),
|
||||
::testing::Values(ov::test::static_shapes_to_test_representation(input_shapes_2d_static)),
|
||||
::testing::Values(ov::test::utils::DEVICE_CPU)),
|
||||
LrnLayerTest::getTestCaseName);
|
||||
|
||||
} // namespace LRN2D
|
||||
|
||||
namespace LRN3D {
|
||||
// 3D
|
||||
|
||||
const std::vector<std::vector<int64_t>> axes = {{1}, {2}};
|
||||
const std::vector<std::vector<int64_t>> axes_3d = {{1}, {2}};
|
||||
std::vector<ov::Shape> input_shapes_3d_static = {{6, 10, 16}};
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_LrnCheck_3D, LrnLayerTest,
|
||||
::testing::Combine(::testing::Values(alpha),
|
||||
::testing::Values(beta),
|
||||
::testing::Values(bias),
|
||||
::testing::Values(size),
|
||||
::testing::ValuesIn(axes),
|
||||
::testing::ValuesIn(netPrecisions),
|
||||
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||
::testing::Values(std::vector<size_t>({6, 10, 16})),
|
||||
::testing::ValuesIn(axes_3d),
|
||||
::testing::ValuesIn(model_types),
|
||||
::testing::Values(ov::test::static_shapes_to_test_representation(input_shapes_3d_static)),
|
||||
::testing::Values(ov::test::utils::DEVICE_CPU)),
|
||||
LrnLayerTest::getTestCaseName);
|
||||
|
||||
} // namespace LRN3D
|
||||
|
||||
namespace LRN4D {
|
||||
|
||||
const std::vector<std::vector<int64_t>> axes = {{1}, {2, 3}, {3, 2}};
|
||||
// 4D
|
||||
|
||||
const std::vector<std::vector<int64_t>> axes_4d = {{1}, {2, 3}, {3, 2}};
|
||||
std::vector<ov::Shape> input_shapes_4d_static = {{10, 10, 3, 8}};
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_LrnCheck_4D, LrnLayerTest,
|
||||
::testing::Combine(::testing::Values(alpha),
|
||||
::testing::Values(beta),
|
||||
::testing::Values(bias),
|
||||
::testing::Values(size),
|
||||
::testing::ValuesIn(axes),
|
||||
::testing::ValuesIn(netPrecisions),
|
||||
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||
::testing::Values(std::vector<size_t>({10, 10, 3, 8})),
|
||||
::testing::ValuesIn(axes_4d),
|
||||
::testing::ValuesIn(model_types),
|
||||
::testing::Values(ov::test::static_shapes_to_test_representation(input_shapes_4d_static)),
|
||||
::testing::Values(ov::test::utils::DEVICE_CPU)),
|
||||
LrnLayerTest::getTestCaseName);
|
||||
|
||||
} // namespace LRN4D
|
||||
|
||||
namespace LRN5D {
|
||||
|
||||
const std::vector<std::vector<int64_t>> axes = {{1}, {2, 3, 4}, {4, 2, 3}};
|
||||
// 5D
|
||||
|
||||
const std::vector<std::vector<int64_t>> axes_5d = {{1}, {2, 3, 4}, {4, 2, 3}};
|
||||
std::vector<ov::Shape> input_shapes_5d_static = {{1, 10, 10, 7, 4}};
|
||||
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_LrnCheck_5D, LrnLayerTest,
|
||||
::testing::Combine(::testing::Values(alpha),
|
||||
::testing::Values(beta),
|
||||
::testing::Values(bias),
|
||||
::testing::Values(size),
|
||||
::testing::ValuesIn(axes),
|
||||
::testing::ValuesIn(netPrecisions),
|
||||
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||
::testing::Values(std::vector<size_t>({1, 10, 10, 7, 4})),
|
||||
::testing::ValuesIn(axes_5d),
|
||||
::testing::ValuesIn(model_types),
|
||||
::testing::Values(ov::test::static_shapes_to_test_representation(input_shapes_5d_static)),
|
||||
::testing::Values(ov::test::utils::DEVICE_CPU)),
|
||||
LrnLayerTest::getTestCaseName);
|
||||
|
||||
} // namespace LRN5D
|
||||
} // namespace
|
||||
|
@ -4,40 +4,45 @@
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "single_layer_tests/lstm_cell.hpp"
|
||||
#include "single_op_tests/lstm_cell.hpp"
|
||||
#include "common_test_utils/test_constants.hpp"
|
||||
|
||||
using namespace LayerTestsDefinitions;
|
||||
|
||||
namespace {
|
||||
std::vector<bool> should_decompose{false, true};
|
||||
std::vector<size_t> batch{5};
|
||||
std::vector<size_t> hidden_size{1, 10};
|
||||
std::vector<size_t> input_size{1, 30};
|
||||
std::vector<std::vector<std::string>> activations = {{"relu", "sigmoid", "tanh"}, {"sigmoid", "tanh", "tanh"},
|
||||
{"tanh", "relu", "sigmoid"}, {"sigmoid", "sigmoid", "sigmoid"},
|
||||
{"tanh", "tanh", "tanh"}, {"relu", "relu", "relu"}};
|
||||
std::vector<float> clip{0.f, 0.7f};
|
||||
std::vector<ngraph::helpers::InputLayerType> layer_types = {
|
||||
ngraph::helpers::InputLayerType::CONSTANT,
|
||||
ngraph::helpers::InputLayerType::PARAMETER
|
||||
};
|
||||
std::vector<InferenceEngine::Precision> netPrecisions = {InferenceEngine::Precision::FP32,
|
||||
InferenceEngine::Precision::FP16};
|
||||
using ov::test::LSTMCellTest;
|
||||
using ov::test::utils::InputLayerType;
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_LSTMCellCommon, LSTMCellTest,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(should_decompose),
|
||||
::testing::ValuesIn(batch),
|
||||
::testing::ValuesIn(hidden_size),
|
||||
::testing::ValuesIn(input_size),
|
||||
::testing::ValuesIn(activations),
|
||||
::testing::ValuesIn(clip),
|
||||
::testing::ValuesIn(layer_types),
|
||||
::testing::ValuesIn(layer_types),
|
||||
::testing::ValuesIn(layer_types),
|
||||
::testing::ValuesIn(netPrecisions),
|
||||
::testing::Values(ov::test::utils::DEVICE_CPU)),
|
||||
LSTMCellTest::getTestCaseName);
|
||||
std::vector<bool> should_decompose{false, true};
|
||||
std::vector<size_t> batch{5};
|
||||
std::vector<size_t> hidden_size{1, 10};
|
||||
std::vector<size_t> input_size{1, 30};
|
||||
std::vector<float> clip{0.f, 0.7f};
|
||||
|
||||
std::vector<std::vector<std::string>> activations = {{"relu", "sigmoid", "tanh"}, {"sigmoid", "tanh", "tanh"},
|
||||
{"tanh", "relu", "sigmoid"}, {"sigmoid", "sigmoid", "sigmoid"},
|
||||
{"tanh", "tanh", "tanh"}, {"relu", "relu", "relu"}};
|
||||
|
||||
std::vector<InputLayerType> layer_types = {
|
||||
InputLayerType::CONSTANT,
|
||||
InputLayerType::PARAMETER
|
||||
};
|
||||
|
||||
std::vector<ov::element::Type> model_types = {
|
||||
ov::element::f32,
|
||||
ov::element::f16};
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_LSTMCellCommon, LSTMCellTest,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(should_decompose),
|
||||
::testing::ValuesIn(batch),
|
||||
::testing::ValuesIn(hidden_size),
|
||||
::testing::ValuesIn(input_size),
|
||||
::testing::ValuesIn(activations),
|
||||
::testing::ValuesIn(clip),
|
||||
::testing::ValuesIn(layer_types),
|
||||
::testing::ValuesIn(layer_types),
|
||||
::testing::ValuesIn(layer_types),
|
||||
::testing::ValuesIn(model_types),
|
||||
::testing::Values(ov::test::utils::DEVICE_CPU)),
|
||||
LSTMCellTest::getTestCaseName);
|
||||
|
||||
} // namespace
|
||||
|
@ -2,81 +2,84 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <vector>
|
||||
#include <ngraph/op/util/attr_types.hpp>
|
||||
#include "single_layer_tests/lstm_sequence.hpp"
|
||||
#include "single_op_tests/lstm_sequence.hpp"
|
||||
#include "common_test_utils/test_constants.hpp"
|
||||
|
||||
using namespace LayerTestsDefinitions;
|
||||
|
||||
namespace {
|
||||
std::vector<ngraph::helpers::SequenceTestsMode> mode{ngraph::helpers::SequenceTestsMode::CONVERT_TO_TI_MAX_SEQ_LEN_CONST,
|
||||
ngraph::helpers::SequenceTestsMode::CONVERT_TO_TI_RAND_SEQ_LEN_CONST,
|
||||
ngraph::helpers::SequenceTestsMode::CONVERT_TO_TI_RAND_SEQ_LEN_PARAM,
|
||||
ngraph::helpers::SequenceTestsMode::PURE_SEQ,
|
||||
ngraph::helpers::SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_CONST,
|
||||
ngraph::helpers::SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_PARAM};
|
||||
// output values increase rapidly without clip, so use only seq_lengths = 2
|
||||
std::vector<size_t> seq_lengths_zero_clip{2};
|
||||
std::vector<size_t> seq_lengths_clip_non_zero{20};
|
||||
std::vector<size_t> batch{10};
|
||||
std::vector<size_t> hidden_size{1, 10};
|
||||
std::vector<size_t> input_size{10};
|
||||
std::vector<std::vector<std::string>> activations = {{"relu", "sigmoid", "tanh"}, {"sigmoid", "tanh", "tanh"},
|
||||
{"tanh", "relu", "sigmoid"}, {"sigmoid", "sigmoid", "sigmoid"},
|
||||
{"tanh", "tanh", "tanh"}, {"relu", "relu", "relu"}};
|
||||
std::vector<float> clip{0.f};
|
||||
std::vector<float> clip_non_zeros{0.7f};
|
||||
std::vector<ngraph::op::RecurrentSequenceDirection> direction = {ngraph::op::RecurrentSequenceDirection::FORWARD,
|
||||
ngraph::op::RecurrentSequenceDirection::REVERSE,
|
||||
ngraph::op::RecurrentSequenceDirection::BIDIRECTIONAL
|
||||
};
|
||||
std::vector<InferenceEngine::Precision> netPrecisions = {InferenceEngine::Precision::FP32,
|
||||
InferenceEngine::Precision::FP16};
|
||||
using ov::test::LSTMSequenceTest;
|
||||
using ov::test::utils::SequenceTestsMode;
|
||||
using ov::test::utils::InputLayerType;
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_LSTMSequenceCommonZeroClip, LSTMSequenceTest,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(mode),
|
||||
::testing::ValuesIn(seq_lengths_zero_clip),
|
||||
::testing::ValuesIn(batch),
|
||||
::testing::ValuesIn(hidden_size),
|
||||
::testing::ValuesIn(input_size),
|
||||
::testing::ValuesIn(activations),
|
||||
::testing::ValuesIn(clip),
|
||||
::testing::ValuesIn(direction),
|
||||
::testing::Values(ngraph::helpers::InputLayerType::CONSTANT),
|
||||
::testing::ValuesIn(netPrecisions),
|
||||
::testing::Values(ov::test::utils::DEVICE_CPU)),
|
||||
LSTMSequenceTest::getTestCaseName);
|
||||
std::vector<SequenceTestsMode> mode{
|
||||
SequenceTestsMode::CONVERT_TO_TI_MAX_SEQ_LEN_CONST,
|
||||
SequenceTestsMode::CONVERT_TO_TI_RAND_SEQ_LEN_CONST,
|
||||
SequenceTestsMode::CONVERT_TO_TI_RAND_SEQ_LEN_PARAM,
|
||||
SequenceTestsMode::PURE_SEQ,
|
||||
SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_CONST,
|
||||
SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_PARAM};
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_LSTMSequenceCommonZeroClipNonconstantWRB, LSTMSequenceTest,
|
||||
::testing::Combine(
|
||||
::testing::Values(ngraph::helpers::SequenceTestsMode::PURE_SEQ),
|
||||
::testing::ValuesIn(seq_lengths_zero_clip),
|
||||
::testing::ValuesIn(batch),
|
||||
::testing::ValuesIn(hidden_size),
|
||||
::testing::ValuesIn(input_size),
|
||||
::testing::ValuesIn(activations),
|
||||
::testing::ValuesIn(clip),
|
||||
::testing::ValuesIn(direction),
|
||||
::testing::Values(ngraph::helpers::InputLayerType::PARAMETER),
|
||||
::testing::ValuesIn(netPrecisions),
|
||||
::testing::Values(ov::test::utils::DEVICE_CPU)),
|
||||
LSTMSequenceTest::getTestCaseName);
|
||||
// output values increase rapidly without clip, so use only seq_lengths = 2
|
||||
std::vector<size_t> seq_lengths_zero_clip{2};
|
||||
std::vector<size_t> seq_lengths_clip_non_zero{20};
|
||||
std::vector<size_t> batch{10};
|
||||
std::vector<size_t> hidden_size{1, 10};
|
||||
std::vector<size_t> input_size{10};
|
||||
std::vector<std::vector<std::string>> activations = {{"relu", "sigmoid", "tanh"}, {"sigmoid", "tanh", "tanh"},
|
||||
{"tanh", "relu", "sigmoid"}, {"sigmoid", "sigmoid", "sigmoid"},
|
||||
{"tanh", "tanh", "tanh"}, {"relu", "relu", "relu"}};
|
||||
std::vector<float> clip{0.f};
|
||||
std::vector<float> clip_non_zeros{0.7f};
|
||||
std::vector<ov::op::RecurrentSequenceDirection> direction = {ov::op::RecurrentSequenceDirection::FORWARD,
|
||||
ov::op::RecurrentSequenceDirection::REVERSE,
|
||||
ov::op::RecurrentSequenceDirection::BIDIRECTIONAL
|
||||
};
|
||||
std::vector<ov::element::Type> model_types = {
|
||||
ov::element::f32,
|
||||
ov::element::f16};
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_LSTMSequenceCommonClip, LSTMSequenceTest,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(mode),
|
||||
::testing::ValuesIn(seq_lengths_clip_non_zero),
|
||||
::testing::ValuesIn(batch),
|
||||
::testing::ValuesIn(hidden_size),
|
||||
::testing::ValuesIn(input_size),
|
||||
::testing::ValuesIn(activations),
|
||||
::testing::ValuesIn(clip_non_zeros),
|
||||
::testing::ValuesIn(direction),
|
||||
::testing::Values(ngraph::helpers::InputLayerType::CONSTANT),
|
||||
::testing::ValuesIn(netPrecisions),
|
||||
::testing::Values(ov::test::utils::DEVICE_CPU)),
|
||||
LSTMSequenceTest::getTestCaseName);
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_LSTMSequenceCommonZeroClip, LSTMSequenceTest,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(mode),
|
||||
::testing::ValuesIn(seq_lengths_zero_clip),
|
||||
::testing::ValuesIn(batch),
|
||||
::testing::ValuesIn(hidden_size),
|
||||
::testing::ValuesIn(input_size),
|
||||
::testing::ValuesIn(activations),
|
||||
::testing::ValuesIn(clip),
|
||||
::testing::ValuesIn(direction),
|
||||
::testing::Values(InputLayerType::CONSTANT),
|
||||
::testing::ValuesIn(model_types),
|
||||
::testing::Values(ov::test::utils::DEVICE_CPU)),
|
||||
LSTMSequenceTest::getTestCaseName);
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_LSTMSequenceCommonZeroClipNonconstantWRB, LSTMSequenceTest,
|
||||
::testing::Combine(
|
||||
::testing::Values(SequenceTestsMode::PURE_SEQ),
|
||||
::testing::ValuesIn(seq_lengths_zero_clip),
|
||||
::testing::ValuesIn(batch),
|
||||
::testing::ValuesIn(hidden_size),
|
||||
::testing::ValuesIn(input_size),
|
||||
::testing::ValuesIn(activations),
|
||||
::testing::ValuesIn(clip),
|
||||
::testing::ValuesIn(direction),
|
||||
::testing::Values(InputLayerType::PARAMETER),
|
||||
::testing::ValuesIn(model_types),
|
||||
::testing::Values(ov::test::utils::DEVICE_CPU)),
|
||||
LSTMSequenceTest::getTestCaseName);
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(smoke_LSTMSequenceCommonClip, LSTMSequenceTest,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(mode),
|
||||
::testing::ValuesIn(seq_lengths_clip_non_zero),
|
||||
::testing::ValuesIn(batch),
|
||||
::testing::ValuesIn(hidden_size),
|
||||
::testing::ValuesIn(input_size),
|
||||
::testing::ValuesIn(activations),
|
||||
::testing::ValuesIn(clip_non_zeros),
|
||||
::testing::ValuesIn(direction),
|
||||
::testing::Values(InputLayerType::CONSTANT),
|
||||
::testing::ValuesIn(model_types),
|
||||
::testing::Values(ov::test::utils::DEVICE_CPU)),
|
||||
LSTMSequenceTest::getTestCaseName);
|
||||
|
||||
} // namespace
|
||||
|
@ -187,6 +187,8 @@ std::vector<std::string> disabledTestPatterns() {
|
||||
// Issue: 121313
|
||||
R"(smoke_GroupConvBackpropData.*paddingDefined/GroupConvBackpropLayerTest.Inference.*f16.*)",
|
||||
R"(smoke_GroupConvBackpropData.*paddingDefined/GroupConvBackpropLayerTest.Inference.*f32.*)",
|
||||
// Issue: 122177
|
||||
R"(smoke_LSTMSequenceCommon.*LSTMSequenceTest.Inference.*CONVERT_TO_TI.*)",
|
||||
// Issue: 122094
|
||||
R"(smoke_Interpolate_Basic_Down_Sample_Tail/InterpolateLayerTest.Inference.*(asymmetric|align_corners).*f16.*)",
|
||||
};
|
||||
|
@ -0,0 +1,15 @@
|
||||
// Copyright (C) 2018-2023 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "shared_test_classes/single_op/loop.hpp"
|
||||
|
||||
namespace ov {
|
||||
namespace test {
|
||||
TEST_P(LoopLayerTest, Inference) {
|
||||
run();
|
||||
}
|
||||
} // namespace test
|
||||
} // namespace ov
|
@ -0,0 +1,15 @@
|
||||
// Copyright (C) 2018-2023 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "shared_test_classes/single_op/lrn.hpp"
|
||||
|
||||
namespace ov {
|
||||
namespace test {
|
||||
TEST_P(LrnLayerTest, Inference) {
|
||||
run();
|
||||
}
|
||||
} // namespace test
|
||||
} // namespace ov
|
@ -0,0 +1,15 @@
|
||||
// Copyright (C) 2018-2023 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "shared_test_classes/single_op/lstm_cell.hpp"
|
||||
|
||||
namespace ov {
|
||||
namespace test {
|
||||
TEST_P(LSTMCellTest, Inference) {
|
||||
run();
|
||||
};
|
||||
} // namespace test
|
||||
} // namespace ov
|
@ -0,0 +1,15 @@
|
||||
// Copyright (C) 2018-2023 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "shared_test_classes/single_op/lstm_sequence.hpp"
|
||||
|
||||
namespace ov {
|
||||
namespace test {
|
||||
TEST_P(LSTMSequenceTest, Inference) {
|
||||
run();
|
||||
};
|
||||
} // namespace test
|
||||
} // namespace ov
|
@ -0,0 +1,39 @@
|
||||
// Copyright (C) 2018-2023 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <tuple>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||
|
||||
namespace ov {
|
||||
namespace test {
|
||||
enum LOOP_IN_TYPE {
|
||||
INVARIANT,
|
||||
MERGED
|
||||
};
|
||||
|
||||
using LoopParams = typename std::tuple<
|
||||
bool, // ExecuteFirstIteration
|
||||
bool, // BodyCondition is a constant?
|
||||
bool, // BodyCondition value, if it is a Const
|
||||
int64_t, // TripCount, -1 means infinity
|
||||
std::vector<InputShape>, // input shapes
|
||||
std::vector<LOOP_IN_TYPE>, // input types. Vector size have to be equal to input shapes vector size
|
||||
ov::element::Type, // Model type
|
||||
std::string>; // Device name
|
||||
|
||||
class LoopLayerTest : public testing::WithParamInterface<LoopParams>,
|
||||
virtual public ov::test::SubgraphBaseTest {
|
||||
public:
|
||||
static std::string getTestCaseName(const testing::TestParamInfo<LoopParams> &obj);
|
||||
|
||||
protected:
|
||||
void SetUp() override;
|
||||
};
|
||||
} // namespace test
|
||||
} // namespace ov
|
@ -0,0 +1,35 @@
|
||||
// Copyright (C) 2018-2023 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <tuple>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||
|
||||
namespace ov {
|
||||
namespace test {
|
||||
typedef std::tuple<
|
||||
double, // Alpha
|
||||
double, // Beta
|
||||
double, // Bias
|
||||
size_t, // Size
|
||||
std::vector<int64_t>, // Reduction axes
|
||||
ov::element::Type, // Network precision
|
||||
std::vector<InputShape>, // Input shapes
|
||||
std::string // Device name
|
||||
> lrnLayerTestParamsSet;
|
||||
|
||||
class LrnLayerTest
|
||||
: public testing::WithParamInterface<lrnLayerTestParamsSet>,
|
||||
virtual public ov::test::SubgraphBaseTest {
|
||||
public:
|
||||
static std::string getTestCaseName(const testing::TestParamInfo<lrnLayerTestParamsSet>& obj);
|
||||
protected:
|
||||
void SetUp() override;
|
||||
};
|
||||
} // namespace test
|
||||
} // namespace ov
|
@ -0,0 +1,38 @@
|
||||
// Copyright (C) 2018-2023 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <tuple>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||
#include "common_test_utils/test_enums.hpp"
|
||||
|
||||
namespace ov {
|
||||
namespace test {
|
||||
using LSTMCellParams = typename std::tuple<
|
||||
bool, // using decompose to sub-ops transformation
|
||||
size_t, // batch
|
||||
size_t, // hidden size
|
||||
size_t, // input size
|
||||
std::vector<std::string>, // activations
|
||||
float, // clip
|
||||
ov::test::utils::InputLayerType, // W input type (Constant or Parameter)
|
||||
ov::test::utils::InputLayerType, // R input type (Constant or Parameter)
|
||||
ov::test::utils::InputLayerType, // B input type (Constant or Parameter)
|
||||
ov::element::Type, // Network precision
|
||||
std::string>; // Device name
|
||||
|
||||
class LSTMCellTest : public testing::WithParamInterface<LSTMCellParams>,
|
||||
virtual public ov::test::SubgraphBaseTest {
|
||||
public:
|
||||
static std::string getTestCaseName(const testing::TestParamInfo<LSTMCellParams> &obj);
|
||||
protected:
|
||||
void SetUp() override;
|
||||
};
|
||||
|
||||
} // namespace test
|
||||
} // namespace ov
|
@ -0,0 +1,38 @@
|
||||
// Copyright (C) 2018-2023 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <tuple>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||
#include "common_test_utils/test_enums.hpp"
|
||||
|
||||
namespace ov {
|
||||
namespace test {
|
||||
using LSTMSequenceParams = typename std::tuple<
|
||||
ov::test::utils::SequenceTestsMode, // pure Sequence or TensorIterator
|
||||
size_t, // seq_lengths
|
||||
size_t, // batch
|
||||
size_t, // hidden size
|
||||
size_t, // input size
|
||||
std::vector<std::string>, // activations
|
||||
float, // clip
|
||||
ov::op::RecurrentSequenceDirection, // direction
|
||||
ov::test::utils::InputLayerType, // WRB input type (Constant or Parameter)
|
||||
ov::element::Type, // Network precision
|
||||
std::string>; // Device name
|
||||
|
||||
|
||||
class LSTMSequenceTest : public testing::WithParamInterface<LSTMSequenceParams>,
|
||||
virtual public ov::test::SubgraphBaseTest {
|
||||
public:
|
||||
static std::string getTestCaseName(const testing::TestParamInfo<LSTMSequenceParams> &obj);
|
||||
protected:
|
||||
void SetUp() override;
|
||||
};
|
||||
} // namespace test
|
||||
} // namespace ov
|
@ -632,7 +632,7 @@ ov::runtime::Tensor generate(const std::shared_ptr<ngraph::op::v5::HSigmoid>& no
|
||||
return Activation::generate(elemType, targetShape);
|
||||
}
|
||||
|
||||
ov::runtime::Tensor generate(const std::shared_ptr<ngraph::op::v5::LSTMSequence>& node,
|
||||
ov::runtime::Tensor generate(const std::shared_ptr<ov::op::v5::LSTMSequence>& node,
|
||||
size_t port,
|
||||
const ov::element::Type& elemType,
|
||||
const ov::Shape& targetShape) {
|
||||
@ -640,6 +640,10 @@ ov::runtime::Tensor generate(const std::shared_ptr<ngraph::op::v5::LSTMSequence>
|
||||
unsigned int m_max_seq_len = 10;
|
||||
return ov::test::utils::create_and_fill_tensor(elemType, targetShape, m_max_seq_len, 0);
|
||||
}
|
||||
if (port == 3 && node->input(0).get_partial_shape().is_static()) {
|
||||
auto seq_len = node->input(0).get_shape()[1];
|
||||
return ov::test::utils::create_and_fill_tensor(elemType, targetShape, seq_len);
|
||||
}
|
||||
return generate(std::dynamic_pointer_cast<ov::Node>(node), port, elemType, targetShape);
|
||||
}
|
||||
|
||||
|
125
src/tests/functional/shared_test_classes/src/single_op/loop.cpp
Normal file
125
src/tests/functional/shared_test_classes/src/single_op/loop.cpp
Normal file
@ -0,0 +1,125 @@
|
||||
// Copyright (C) 2018-2023 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "shared_test_classes/single_op/loop.hpp"
|
||||
|
||||
#include "transformations/control_flow/unroll_tensor_iterator.hpp"
|
||||
#include "openvino/op/parameter.hpp"
|
||||
#include "openvino/op/constant.hpp"
|
||||
#include "openvino/op/result.hpp"
|
||||
#include "openvino/op/loop.hpp"
|
||||
|
||||
namespace ov {
|
||||
namespace test {
|
||||
std::string LoopLayerTest::getTestCaseName(const testing::TestParamInfo<LoopParams> &obj) {
|
||||
bool execute_first_iteration;
|
||||
bool is_body_condition_const;
|
||||
bool body_condition; // works only if is_body_condition_const ==
|
||||
int64_t trip_count;
|
||||
std::vector<InputShape> shapes;
|
||||
std::vector<LOOP_IN_TYPE> input_types;
|
||||
ov::element::Type model_type;
|
||||
std::string targetDevice;
|
||||
std::tie(execute_first_iteration, is_body_condition_const, body_condition, trip_count, shapes, input_types, model_type,
|
||||
targetDevice) = obj.param;
|
||||
|
||||
std::ostringstream result;
|
||||
result << "IS=(";
|
||||
for (size_t i = 0lu; i < shapes.size(); i++) {
|
||||
result << ov::test::utils::partialShape2str({shapes[i].first}) << (i < shapes.size() - 1lu ? "_" : "");
|
||||
}
|
||||
result << ")_TS=";
|
||||
for (size_t i = 0lu; i < shapes.front().second.size(); i++) {
|
||||
result << "{";
|
||||
for (size_t j = 0lu; j < shapes.size(); j++) {
|
||||
result << ov::test::utils::vec2str(shapes[j].second[i]) << (j < shapes.size() - 1lu ? "_" : "");
|
||||
}
|
||||
result << "}_";
|
||||
}
|
||||
result << "execute_first_iteration" << execute_first_iteration << "_";
|
||||
result << "is_body_condition_const=" << is_body_condition_const << "_";
|
||||
result << "body_condition=" << body_condition << "_";
|
||||
result << "trip_count=" << trip_count << "_";
|
||||
result << "types=" << ov::test::utils::vec2str(input_types) << "_";
|
||||
result << "modelType=" << model_type.get_type_name() << "_";
|
||||
result << "targetDevice=" << targetDevice << "_";
|
||||
auto res_str = result.str();
|
||||
std::replace(res_str.begin(), res_str.end(), '-', '_');
|
||||
return res_str;
|
||||
}
|
||||
|
||||
void LoopLayerTest::SetUp() {
|
||||
bool execute_first_iteration;
|
||||
bool is_body_condition_const;
|
||||
bool body_condition; // works only if is_body_condition_const ==
|
||||
int64_t trip_count;
|
||||
std::vector<InputShape> shapes;
|
||||
std::vector<LOOP_IN_TYPE> input_types;
|
||||
ov::element::Type model_type;
|
||||
std::tie(execute_first_iteration, is_body_condition_const, body_condition, trip_count, shapes, input_types, model_type,
|
||||
targetDevice) = this->GetParam();
|
||||
init_input_shapes(shapes);
|
||||
|
||||
// Example:
|
||||
/* auto X = std::make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::Shape{32, 1, 10});
|
||||
auto Y = std::make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::Shape{32, 1, 10});
|
||||
auto M = std::make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::Shape{32, 1, 10});*/
|
||||
ov::ParameterVector params;
|
||||
for (auto&& shape : inputDynamicShapes) {
|
||||
params.push_back(std::make_shared<ov::op::v0::Parameter>(model_type, shape));
|
||||
}
|
||||
|
||||
//Example:
|
||||
/* auto Xi = std::make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::PartialShape::dynamic());
|
||||
auto Yi = std::make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::PartialShape::dynamic());
|
||||
auto M_body = std::make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::PartialShape::dynamic());*/
|
||||
|
||||
ov::ParameterVector body_params;
|
||||
for (int i = 0; i < inputDynamicShapes.size(); i++) {
|
||||
body_params.push_back(std::make_shared<ov::op::v0::Parameter>(model_type, ov::PartialShape::dynamic()));
|
||||
}
|
||||
|
||||
std::shared_ptr<ov::Node> body_condition_const;
|
||||
if (is_body_condition_const) {
|
||||
body_condition_const = std::make_shared<ov::op::v0::Constant>(ov::element::boolean, ov::Shape{1}, body_condition);
|
||||
}
|
||||
auto trip_count_const = std::make_shared<ov::op::v0::Constant>(ov::element::i64, ov::Shape{1}, trip_count);
|
||||
auto exec_condition = std::make_shared<ov::op::v0::Constant>(ov::element::boolean, ov::Shape{1}, execute_first_iteration);
|
||||
|
||||
// Body
|
||||
std::shared_ptr<ov::Node> Zo = body_params[0];
|
||||
for (int i = 1; i < body_params.size(); ++i) {
|
||||
Zo = std::make_shared<ov::op::v1::Add>(body_params[i], Zo);
|
||||
}
|
||||
|
||||
auto body = std::make_shared<ov::Model>(ov::OutputVector{body_condition_const, Zo}, body_params);
|
||||
|
||||
auto loop = std::make_shared<ov::op::v5::Loop>(trip_count_const, exec_condition);
|
||||
loop->set_function(body);
|
||||
loop->set_special_body_ports(ov::op::v5::Loop::SpecialBodyPorts{-1, 0});
|
||||
|
||||
for (int i = 0; i < body_params.size(); ++i) {
|
||||
if (input_types[i] == LOOP_IN_TYPE::INVARIANT) {
|
||||
loop->set_invariant_input(body_params[i], params[i]);
|
||||
} else if (input_types[i] == LOOP_IN_TYPE::MERGED) {
|
||||
// todo: support several merged inputs
|
||||
// now supported only one in this sample
|
||||
loop->set_merged_input(body_params[i], params[i], Zo);
|
||||
}
|
||||
}
|
||||
|
||||
// Output 0 is last Zo
|
||||
auto out0 = loop->get_iter_value(body_condition_const, -1);
|
||||
auto out1 = loop->get_iter_value(Zo, -1);
|
||||
// Output 1 is concat of Zos
|
||||
// start=0, stride=1, part_size=1, end=-1, axis=1
|
||||
auto out2 = loop->get_concatenated_slices(Zo, 0, 1, 1, -1, 1);
|
||||
|
||||
auto result0 = std::make_shared<ov::op::v0::Result>(out0);
|
||||
auto result1 = std::make_shared<ov::op::v0::Result>(out1);
|
||||
auto result2 = std::make_shared<ov::op::v0::Result>(out2);
|
||||
function = std::make_shared<ov::Model>(ov::ResultVector{result0, result1, result2}, params, "loop");
|
||||
}
|
||||
} // namespace test
|
||||
} // namespace ov
|
@ -0,0 +1,66 @@
|
||||
// Copyright (C) 2018-2023 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "shared_test_classes/single_op/lrn.hpp"
|
||||
|
||||
#include "openvino/op/parameter.hpp"
|
||||
#include "openvino/op/constant.hpp"
|
||||
#include "openvino/op/result.hpp"
|
||||
#include "openvino/op/lrn.hpp"
|
||||
|
||||
namespace ov {
|
||||
namespace test {
|
||||
std::string LrnLayerTest::getTestCaseName(const testing::TestParamInfo<lrnLayerTestParamsSet>& obj) {
|
||||
double alpha, beta, bias;
|
||||
size_t size;
|
||||
std::vector<int64_t> axes;
|
||||
ov::element::Type model_type;
|
||||
std::vector<InputShape> shapes;
|
||||
std::string targetDevice;
|
||||
std::tie(alpha, beta, bias, size, axes, model_type, shapes, targetDevice) = obj.param;
|
||||
|
||||
std::ostringstream result;
|
||||
const char separator = '_';
|
||||
result << "IS=(";
|
||||
for (size_t i = 0lu; i < shapes.size(); i++) {
|
||||
result << ov::test::utils::partialShape2str({shapes[i].first}) << (i < shapes.size() - 1lu ? "_" : "");
|
||||
}
|
||||
result << ")_TS=";
|
||||
for (size_t i = 0lu; i < shapes.front().second.size(); i++) {
|
||||
result << "{";
|
||||
for (size_t j = 0lu; j < shapes.size(); j++) {
|
||||
result << ov::test::utils::vec2str(shapes[j].second[i]) << (j < shapes.size() - 1lu ? "_" : "");
|
||||
}
|
||||
result << "}_";
|
||||
}
|
||||
result << "Alpha=" << alpha << separator;
|
||||
result << "Beta=" << beta << separator;
|
||||
result << "Bias=" << bias << separator;
|
||||
result << "Size=" << size << separator;
|
||||
result << "Axes=" << ov::test::utils::vec2str(axes) << separator;
|
||||
result << "netPRC=" << model_type.get_type_name() << separator;
|
||||
result << "trgDev=" << targetDevice;
|
||||
|
||||
return result.str();
|
||||
}
|
||||
|
||||
void LrnLayerTest::SetUp() {
|
||||
ov::element::Type model_type;
|
||||
std::vector<InputShape> shapes;
|
||||
double alpha, beta, bias;
|
||||
size_t size;
|
||||
std::vector<int64_t> axes;
|
||||
std::tie(alpha, beta, bias, size, axes, model_type, shapes, targetDevice) = GetParam();
|
||||
init_input_shapes(shapes);
|
||||
|
||||
auto param = std::make_shared<ov::op::v0::Parameter>(model_type, inputDynamicShapes.front());
|
||||
auto axes_node = std::make_shared<ov::op::v0::Constant>(ov::element::i64, ov::Shape{axes.size()}, axes.data());
|
||||
|
||||
auto lrn = std::make_shared<ov::op::v0::LRN>(param, axes_node, alpha, beta, bias, size);
|
||||
|
||||
auto result = std::make_shared<ov::op::v0::Result>(lrn);
|
||||
function = std::make_shared<ngraph::Function>(result, ov::ParameterVector{param}, "lrn");
|
||||
}
|
||||
} // namespace test
|
||||
} // namespace ov
|
@ -0,0 +1,141 @@
|
||||
// Copyright (C) 2018-2023 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "shared_test_classes/single_op/lstm_cell.hpp"
|
||||
|
||||
#include "common_test_utils/ov_tensor_utils.hpp"
|
||||
#include "transformations/op_conversions/lstm_cell_decomposition.hpp"
|
||||
#include "openvino/pass/manager.hpp"
|
||||
#include "openvino/op/parameter.hpp"
|
||||
#include "openvino/op/result.hpp"
|
||||
#include "openvino/op/constant.hpp"
|
||||
#include "openvino/op/lstm_cell.hpp"
|
||||
|
||||
namespace ov {
|
||||
namespace test {
|
||||
using ov::test::utils::InputLayerType;
|
||||
|
||||
std::string LSTMCellTest::getTestCaseName(const testing::TestParamInfo<LSTMCellParams> &obj) {
|
||||
bool should_decompose;
|
||||
size_t batch;
|
||||
size_t hidden_size;
|
||||
size_t input_size;
|
||||
std::vector<std::string> activations;
|
||||
std::vector<float> activations_alpha;
|
||||
std::vector<float> activations_beta;
|
||||
float clip;
|
||||
InputLayerType WType;
|
||||
InputLayerType RType;
|
||||
InputLayerType BType;
|
||||
ov::element::Type model_type;
|
||||
std::string targetDevice;
|
||||
std::tie(should_decompose, batch, hidden_size, input_size, activations, clip, WType, RType, BType,
|
||||
model_type, targetDevice) = obj.param;
|
||||
std::vector<std::vector<size_t>> input_shapes = {
|
||||
{{batch, input_size}, {batch, hidden_size}, {batch, hidden_size}, {4 * hidden_size, input_size},
|
||||
{4 * hidden_size, hidden_size}, {4 * hidden_size}},
|
||||
};
|
||||
std::ostringstream result;
|
||||
result << "decomposition" << should_decompose << "_";
|
||||
result << "batch=" << batch << "_";
|
||||
result << "hidden_size=" << hidden_size << "_";
|
||||
result << "input_size=" << input_size << "_";
|
||||
result << "IS=" << ov::test::utils::vec2str(input_shapes) << "_";
|
||||
result << "activations=" << ov::test::utils::vec2str(activations) << "_";
|
||||
result << "clip=" << clip << "_";
|
||||
result << "WType=" << WType << "_";
|
||||
result << "RType=" << RType << "_";
|
||||
result << "BType=" << BType << "_";
|
||||
result << "modelType=" << model_type.get_type_name() << "_";
|
||||
result << "targetDevice=" << targetDevice << "_";
|
||||
return result.str();
|
||||
}
|
||||
|
||||
void LSTMCellTest::SetUp() {
|
||||
bool should_decompose;
|
||||
size_t batch;
|
||||
size_t hidden_size;
|
||||
size_t input_size;
|
||||
std::vector<std::string> activations;
|
||||
std::vector<float> activations_alpha;
|
||||
std::vector<float> activations_beta;
|
||||
float clip;
|
||||
InputLayerType WType;
|
||||
InputLayerType RType;
|
||||
InputLayerType BType;
|
||||
ov::element::Type model_type;
|
||||
std::tie(should_decompose, batch, hidden_size, input_size, activations, clip, WType, RType, BType,
|
||||
model_type, targetDevice) = this->GetParam();
|
||||
|
||||
std::vector<ov::Shape> input_shapes = {
|
||||
{batch, input_size},
|
||||
{batch, hidden_size},
|
||||
{batch, hidden_size},
|
||||
{4 * hidden_size, input_size},
|
||||
{4 * hidden_size, hidden_size},
|
||||
{4 * hidden_size}
|
||||
};
|
||||
|
||||
std::vector<ov::Shape> param_shapes{input_shapes[0], input_shapes[1], input_shapes[2]};
|
||||
if (WType == InputLayerType::PARAMETER)
|
||||
param_shapes.push_back(input_shapes[3]);
|
||||
|
||||
if (RType == InputLayerType::PARAMETER)
|
||||
param_shapes.push_back(input_shapes[4]);
|
||||
|
||||
if (BType == InputLayerType::PARAMETER)
|
||||
param_shapes.push_back(input_shapes[5]);
|
||||
init_input_shapes(ov::test::static_shapes_to_test_representation(param_shapes));
|
||||
|
||||
ov::ParameterVector params{std::make_shared<ov::op::v0::Parameter>(model_type, inputDynamicShapes[0]),
|
||||
std::make_shared<ov::op::v0::Parameter>(model_type, inputDynamicShapes[1]),
|
||||
std::make_shared<ov::op::v0::Parameter>(model_type, inputDynamicShapes[2])};
|
||||
|
||||
|
||||
ov::NodeVector inputs{params[0], params[1], params[2]};
|
||||
if (WType == InputLayerType::PARAMETER) {
|
||||
auto param = std::make_shared<ov::op::v0::Parameter>(model_type, inputDynamicShapes[params.size()]);
|
||||
params.push_back(param);
|
||||
inputs.push_back(param);
|
||||
} else {
|
||||
auto tensor = ov::test::utils::create_and_fill_tensor(model_type, input_shapes[3]);
|
||||
auto constant = std::make_shared<ov::op::v0::Constant>(tensor);
|
||||
inputs.push_back(constant);
|
||||
}
|
||||
|
||||
if (RType == InputLayerType::PARAMETER) {
|
||||
auto param = std::make_shared<ov::op::v0::Parameter>(model_type, inputDynamicShapes[params.size()]);
|
||||
params.push_back(param);
|
||||
inputs.push_back(param);
|
||||
} else {
|
||||
auto tensor = ov::test::utils::create_and_fill_tensor(model_type, input_shapes[4]);
|
||||
auto constant = std::make_shared<ov::op::v0::Constant>(tensor);
|
||||
inputs.push_back(constant);
|
||||
}
|
||||
|
||||
if (BType == InputLayerType::PARAMETER) {
|
||||
auto param = std::make_shared<ov::op::v0::Parameter>(model_type, inputDynamicShapes[params.size()]);
|
||||
params.push_back(param);
|
||||
inputs.push_back(param);
|
||||
} else {
|
||||
auto tensor = ov::test::utils::create_and_fill_tensor(model_type, input_shapes[5]);
|
||||
auto constant = std::make_shared<ov::op::v0::Constant>(tensor);
|
||||
inputs.push_back(constant);
|
||||
}
|
||||
|
||||
auto lstm_cell = std::make_shared<ov::op::v4::LSTMCell>(inputs[0], inputs[1], inputs[2], inputs[3], inputs[4], inputs[5],
|
||||
hidden_size, activations,
|
||||
activations_alpha, activations_beta, clip);
|
||||
|
||||
ov::ResultVector results{std::make_shared<ov::op::v0::Result>(lstm_cell->output(0)),
|
||||
std::make_shared<ov::op::v0::Result>(lstm_cell->output(1))};
|
||||
function = std::make_shared<ov::Model>(results, params, "lstm_cell");
|
||||
if (should_decompose) {
|
||||
ov::pass::Manager m;
|
||||
m.register_pass<ov::pass::LSTMCellDecomposition>();
|
||||
m.run_passes(function);
|
||||
}
|
||||
}
|
||||
} // namespace test
|
||||
} // namespace ov
|
@ -0,0 +1,175 @@
|
||||
// Copyright (C) 2018-2023 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "shared_test_classes/single_op/lstm_sequence.hpp"
|
||||
|
||||
#include "transformations/op_conversions/bidirectional_sequences_decomposition.hpp"
|
||||
#include "transformations/op_conversions/convert_sequences_to_tensor_iterator.hpp"
|
||||
#include "openvino/pass/visualize_tree.hpp"
|
||||
#include "openvino/pass/manager.hpp"
|
||||
#include "openvino/op/parameter.hpp"
|
||||
#include "openvino/op/result.hpp"
|
||||
#include "openvino/op/constant.hpp"
|
||||
#include "openvino/op/lstm_sequence.hpp"
|
||||
#include "common_test_utils/ov_tensor_utils.hpp"
|
||||
#include "ov_models/utils/ov_helpers.hpp"
|
||||
|
||||
|
||||
namespace ov {
|
||||
namespace test {
|
||||
using ov::test::utils::SequenceTestsMode;
|
||||
using ov::test::utils::InputLayerType;
|
||||
|
||||
std::string LSTMSequenceTest::getTestCaseName(const testing::TestParamInfo<LSTMSequenceParams> &obj) {
|
||||
SequenceTestsMode mode;
|
||||
size_t seq_lengths;
|
||||
size_t batch;
|
||||
size_t hidden_size;
|
||||
size_t input_size;
|
||||
std::vector<std::string> activations;
|
||||
std::vector<float> activations_alpha;
|
||||
std::vector<float> activations_beta;
|
||||
float clip;
|
||||
ov::op::RecurrentSequenceDirection direction;
|
||||
InputLayerType WRBType;
|
||||
ov::element::Type model_type;
|
||||
std::string targetDevice;
|
||||
std::tie(mode, seq_lengths, batch, hidden_size, input_size, activations, clip, direction,
|
||||
WRBType, model_type, targetDevice) = obj.param;
|
||||
std::vector<std::vector<size_t>> input_shapes = {
|
||||
{{batch, input_size}, {batch, hidden_size}, {batch, hidden_size}, {4 * hidden_size, input_size},
|
||||
{4 * hidden_size, hidden_size}, {4 * hidden_size}},
|
||||
};
|
||||
std::ostringstream result;
|
||||
result << "mode=" << mode << "_";
|
||||
result << "seq_lengths=" << seq_lengths << "_";
|
||||
result << "batch=" << batch << "_";
|
||||
result << "hidden_size=" << hidden_size << "_";
|
||||
result << "input_size=" << input_size << "_";
|
||||
result << "IS=" << ov::test::utils::vec2str(input_shapes) << "_";
|
||||
result << "activations=" << ov::test::utils::vec2str(activations) << "_";
|
||||
result << "direction=" << direction << "_";
|
||||
result << "clip=" << clip << "_";
|
||||
result << "WRBType=" << WRBType << "_";
|
||||
result << "modelType=" << model_type.get_type_name() << "_";
|
||||
result << "targetDevice=" << targetDevice << "_";
|
||||
return result.str();
|
||||
}
|
||||
|
||||
void LSTMSequenceTest::SetUp() {
|
||||
SequenceTestsMode mode;
|
||||
size_t seq_lengths;
|
||||
size_t batch;
|
||||
size_t hidden_size;
|
||||
size_t input_size;
|
||||
std::vector<std::string> activations;
|
||||
std::vector<float> activations_alpha;
|
||||
std::vector<float> activations_beta;
|
||||
float clip;
|
||||
ov::op::RecurrentSequenceDirection direction;
|
||||
InputLayerType WRBType;
|
||||
ov::element::Type model_type;
|
||||
std::tie(mode, seq_lengths, batch, hidden_size, input_size, activations, clip, direction,
|
||||
WRBType, model_type, targetDevice) = this->GetParam();
|
||||
size_t num_directions = direction == ov::op::RecurrentSequenceDirection::BIDIRECTIONAL ? 2 : 1;
|
||||
std::vector<ov::Shape> inputShapes = {
|
||||
{batch, seq_lengths, input_size},
|
||||
{batch, num_directions, hidden_size},
|
||||
{batch, num_directions, hidden_size},
|
||||
{batch},
|
||||
{num_directions, 4 * hidden_size, input_size},
|
||||
{num_directions, 4 * hidden_size, hidden_size},
|
||||
{num_directions, 4 * hidden_size},
|
||||
};
|
||||
|
||||
const auto& W_shape = inputShapes[4];
|
||||
const auto& R_shape = inputShapes[5];
|
||||
const auto& B_shape = inputShapes[6];
|
||||
|
||||
std::vector<ov::Shape> param_shapes{inputShapes[0], inputShapes[1], inputShapes[2]};
|
||||
std::vector<ov::Shape> const_input_shapes;
|
||||
if (mode == SequenceTestsMode::CONVERT_TO_TI_MAX_SEQ_LEN_PARAM ||
|
||||
mode == SequenceTestsMode::CONVERT_TO_TI_RAND_SEQ_LEN_PARAM ||
|
||||
mode == SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_PARAM) {
|
||||
param_shapes.push_back(inputShapes[3]);
|
||||
}
|
||||
|
||||
if (WRBType == InputLayerType::PARAMETER) {
|
||||
param_shapes.push_back(inputShapes[4]);
|
||||
param_shapes.push_back(inputShapes[5]);
|
||||
param_shapes.push_back(inputShapes[6]);
|
||||
}
|
||||
init_input_shapes(ov::test::static_shapes_to_test_representation(param_shapes));
|
||||
|
||||
ov::ParameterVector params{std::make_shared<ov::op::v0::Parameter>(model_type, inputDynamicShapes[0]),
|
||||
std::make_shared<ov::op::v0::Parameter>(model_type, inputDynamicShapes[1]),
|
||||
std::make_shared<ov::op::v0::Parameter>(model_type, inputDynamicShapes[2])};
|
||||
|
||||
std::shared_ptr<ov::Node> seq_lengths_node;
|
||||
if (mode == SequenceTestsMode::CONVERT_TO_TI_MAX_SEQ_LEN_PARAM ||
|
||||
mode == SequenceTestsMode::CONVERT_TO_TI_RAND_SEQ_LEN_PARAM ||
|
||||
mode == SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_PARAM) {
|
||||
auto param = std::make_shared<ov::op::v0::Parameter>(ov::element::i64, inputDynamicShapes[3]);
|
||||
seq_lengths_node = param;
|
||||
seq_lengths_node->set_friendly_name("seq_lengths");
|
||||
params.push_back(param);
|
||||
} else if (mode == SequenceTestsMode::CONVERT_TO_TI_RAND_SEQ_LEN_CONST ||
|
||||
mode == SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_CONST) {
|
||||
auto tensor = ov::test::utils::create_and_fill_tensor(ov::element::i64, inputShapes[3], seq_lengths);
|
||||
seq_lengths_node = std::make_shared<ov::op::v0::Constant>(tensor);
|
||||
} else {
|
||||
std::vector<int64_t> lengths(inputShapes[3][0], seq_lengths);
|
||||
seq_lengths_node = ov::op::v0::Constant::create(ov::element::i64, inputShapes[3], lengths);
|
||||
}
|
||||
|
||||
std::shared_ptr<ov::Node> W, R, B;
|
||||
if (WRBType == InputLayerType::PARAMETER) {
|
||||
auto param_num = inputDynamicShapes.size();
|
||||
const auto W_param = std::make_shared<ov::op::v0::Parameter>(model_type, inputDynamicShapes[param_num - 3]);
|
||||
const auto R_param = std::make_shared<ov::op::v0::Parameter>(model_type, inputDynamicShapes[param_num - 2]);
|
||||
const auto B_param = std::make_shared<ov::op::v0::Parameter>(model_type, inputDynamicShapes[param_num - 1]);
|
||||
W = W_param;
|
||||
R = R_param;
|
||||
B = B_param;
|
||||
params.push_back(W_param);
|
||||
params.push_back(R_param);
|
||||
params.push_back(B_param);
|
||||
} else {
|
||||
auto tensor_w = ov::test::utils::create_and_fill_tensor(model_type, W_shape);
|
||||
W = std::make_shared<ov::op::v0::Constant>(tensor_w);
|
||||
|
||||
auto tensor_r = ov::test::utils::create_and_fill_tensor(model_type, R_shape);
|
||||
R = std::make_shared<ov::op::v0::Constant>(tensor_r);
|
||||
|
||||
auto tensor_b = ov::test::utils::create_and_fill_tensor(model_type, B_shape);
|
||||
B = std::make_shared<ov::op::v0::Constant>(tensor_b);
|
||||
}
|
||||
|
||||
auto lstm_sequence = std::make_shared<ov::op::v5::LSTMSequence>(params[0], params[1], params[2], seq_lengths_node, W, R, B, hidden_size, direction,
|
||||
std::vector<float>{}, std::vector<float>{}, activations, clip);
|
||||
|
||||
ov::ResultVector results{std::make_shared<ov::op::v0::Result>(lstm_sequence->output(0)),
|
||||
std::make_shared<ov::op::v0::Result>(lstm_sequence->output(1)),
|
||||
std::make_shared<ov::op::v0::Result>(lstm_sequence->output(2))};
|
||||
|
||||
function = std::make_shared<ov::Model>(results, params, "lstm_sequence");
|
||||
bool is_pure_sequence = mode == SequenceTestsMode::PURE_SEQ ||
|
||||
mode == SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_PARAM ||
|
||||
mode == SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_CONST;
|
||||
|
||||
if (!is_pure_sequence) {
|
||||
ov::pass::Manager manager;
|
||||
if (direction == ov::op::RecurrentSequenceDirection::BIDIRECTIONAL)
|
||||
manager.register_pass<ov::pass::BidirectionalLSTMSequenceDecomposition>();
|
||||
manager.register_pass<ov::pass::ConvertLSTMSequenceToTensorIterator>();
|
||||
manager.run_passes(function);
|
||||
bool ti_found = ngraph::helpers::is_tensor_iterator_exist(function);
|
||||
EXPECT_EQ(ti_found, true);
|
||||
} else {
|
||||
bool ti_found = ngraph::helpers::is_tensor_iterator_exist(function);
|
||||
EXPECT_EQ(ti_found, false);
|
||||
}
|
||||
}
|
||||
} // namespace test
|
||||
} // namespace ov
|
Loading…
Reference in New Issue
Block a user