Refactor MatMulTest, MaxMinLayerTest, Mvn1LayerTest (#20292)

* Refactor MatMulTest

* Refactor MaxMinLayerTest

* Refactor Mvn1LayerTest
This commit is contained in:
Oleg Pipikin 2023-10-10 00:35:32 +02:00 committed by Alexander Nesterov
parent b31d79a2c0
commit 3c01e71a76
12 changed files with 687 additions and 133 deletions

View File

@ -4,60 +4,100 @@
#include <vector>
#include "single_layer_tests/mat_mul.hpp"
using namespace LayerTestsDefinitions;
#include "single_op_tests/mat_mul.hpp"
namespace {
using ov::test::MatMulLayerTest;
using ov::test::utils::InputLayerType;
const std::vector<InferenceEngine::Precision> inputPrecisions = {
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::I32,
const std::vector<ov::element::Type> model_types = {
ov::element::f32,
ov::element::i32,
};
const std::vector<ShapeRelatedParams> shapeRelatedParams = {
{ { {1, 4, 5, 6}, false }, { {1, 4, 6, 4}, false } },
{ { {4, 5, 6}, false }, { {6, 3}, false } },
{ { {9, 9, 9}, false }, { {9, 9}, false } },
{ { {1, 2, 3}, false }, { {1, 10, 3}, true } },
{ { {1, 2, 3}, false }, { {1, 3, 10}, false } },
{ { {1, 2, 3}, false }, { {1, 1, 3, 2}, false } },
{ { {1, 3, 2, 4}, false }, { {2, 1, 4, 2}, false } },
{ { {2, 1, 2, 4}, false }, { {1, 3, 4, 2}, false } },
{ { {3, 2, 4}, false }, { {2, 1, 4, 2}, false } },
{ { {2, 1, 4, 2}, false }, { {3, 2, 4}, false } },
{ { {2, 1, 2, 3}, true }, { {3, 2, 4}, false } },
{ { {2, 1, 3, 2}, false }, { {3, 4, 2}, true } },
{ { {2, 1, 2, 3}, true }, { {3, 4, 2}, true } },
{ { {3}, false }, { {2, 2, 3, 1}, false } },
{ { {2, 2, 1, 3}, false }, { {3}, false } },
{ { {1, 5}, false }, { {5, 1}, false } },
{ { {5, 1}, true }, { {5, 1}, false } },
{ { {1, 5}, false }, { {10, 5}, true } },
{ { {1, 5}, false }, { {5}, false } },
{ { {5}, false }, { {5, 1}, false } },
{ { {5}, false }, { {5}, false } },
{ { {5}, true }, { {5}, true } }
std::vector<std::vector<ov::Shape>> input_shapes_no_transpose_static {
{ {1, 4, 5, 6}, {1, 4, 6, 4} },
{ {4, 5, 6}, {6, 3} },
{ {9, 9, 9}, {9, 9} },
{ {1, 2, 3}, {1, 3, 10} },
{ {1, 2, 3}, {1, 1, 3, 2} },
{ {1, 3, 2, 4}, {2, 1, 4, 2} },
{ {2, 1, 2, 4}, {1, 3, 4, 2} },
{ {3, 2, 4}, {2, 1, 4, 2} },
{ {2, 1, 4, 2}, {3, 2, 4} },
{ {3}, {2, 2, 3, 1} },
{ {2, 2, 1, 3}, {3} },
{ {1, 5}, {5, 1} },
{ {1, 5}, {5} },
{ {5}, {5, 1} },
{ {5}, {5} },
};
std::vector<ngraph::helpers::InputLayerType> secondaryInputTypes = {
ngraph::helpers::InputLayerType::CONSTANT,
ngraph::helpers::InputLayerType::PARAMETER,
std::vector<std::vector<ov::Shape>> input_shapes_first_transpose_static {
{ {2, 1, 2, 3}, {3, 2, 4} },
{ {5, 1}, {5, 1} },
};
std::vector<std::vector<ov::Shape>> input_shapes_second_transpose_static {
{ {1, 2, 3}, {1, 10, 3} },
{ {2, 1, 3, 2}, {3, 4, 2} },
{ {1, 5}, {10, 5} },
};
std::vector<std::vector<ov::Shape>> input_shapes_both_transpose_static {
{ {2, 1, 2, 3}, {3, 4, 2} },
{ {5}, {5}, },
};
std::vector<InputLayerType> secondary_input_types = {
InputLayerType::CONSTANT,
InputLayerType::PARAMETER,
};
std::map<std::string, std::string> additional_config = {};
INSTANTIATE_TEST_SUITE_P(smoke_MatMul, MatMulTest,
INSTANTIATE_TEST_SUITE_P(smoke_MatMul_NoTranspose, MatMulLayerTest,
::testing::Combine(
::testing::ValuesIn(shapeRelatedParams),
::testing::ValuesIn(inputPrecisions),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::ValuesIn(secondaryInputTypes),
::testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_no_transpose_static)),
::testing::Values(std::make_pair(false, false)),
::testing::ValuesIn(model_types),
::testing::ValuesIn(secondary_input_types),
::testing::Values(ov::test::utils::DEVICE_CPU),
::testing::Values(additional_config)),
MatMulTest::getTestCaseName);
MatMulLayerTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_MatMul_FirstTranspose, MatMulLayerTest,
::testing::Combine(
::testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_first_transpose_static)),
::testing::Values(std::make_pair(true, false)),
::testing::ValuesIn(model_types),
::testing::ValuesIn(secondary_input_types),
::testing::Values(ov::test::utils::DEVICE_CPU),
::testing::Values(additional_config)),
MatMulLayerTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_MatMul_SecondTranspose, MatMulLayerTest,
::testing::Combine(
::testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_second_transpose_static)),
::testing::Values(std::make_pair(false, true)),
::testing::ValuesIn(model_types),
::testing::ValuesIn(secondary_input_types),
::testing::Values(ov::test::utils::DEVICE_CPU),
::testing::Values(additional_config)),
MatMulLayerTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_MatMul_BothTranspose, MatMulLayerTest,
::testing::Combine(
::testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_both_transpose_static)),
::testing::Values(std::make_pair(true, true)),
::testing::ValuesIn(model_types),
::testing::ValuesIn(secondary_input_types),
::testing::Values(ov::test::utils::DEVICE_CPU),
::testing::Values(additional_config)),
MatMulLayerTest::getTestCaseName);
} // namespace

View File

@ -2,15 +2,15 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include "single_layer_tests/minimum_maximum.hpp"
#include "single_op_tests/minimum_maximum.hpp"
#include "common_test_utils/test_constants.hpp"
using namespace LayerTestsDefinitions;
namespace {
using ov::test::MaxMinLayerTest;
using ov::test::utils::InputLayerType;
using ov::test::utils::MinMaxOpType;
const std::vector<std::vector<std::vector<size_t>>> inShapes = {
const std::vector<std::vector<ov::Shape>> input_shapes_static = {
{{2}, {1}},
{{1, 1, 1, 3}, {1}},
{{1, 2, 4}, {1}},
@ -20,31 +20,27 @@ const std::vector<std::vector<std::vector<size_t>>> inShapes = {
{{8, 1, 6, 1}, {7, 1, 5}},
};
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16,
const std::vector<ov::element::Type> model_types = {
ov::element::f32,
ov::element::f16,
};
const std::vector<ngraph::helpers::MinMaxOpType> opType = {
ngraph::helpers::MinMaxOpType::MINIMUM,
ngraph::helpers::MinMaxOpType::MAXIMUM,
const std::vector<MinMaxOpType> op_types = {
MinMaxOpType::MINIMUM,
MinMaxOpType::MAXIMUM,
};
const std::vector<ngraph::helpers::InputLayerType> inputType = {
ngraph::helpers::InputLayerType::CONSTANT,
ngraph::helpers::InputLayerType::PARAMETER,
const std::vector<InputLayerType> second_input_types = {
InputLayerType::CONSTANT,
InputLayerType::PARAMETER,
};
INSTANTIATE_TEST_SUITE_P(smoke_maximum, MaxMinLayerTest,
::testing::Combine(
::testing::ValuesIn(inShapes),
::testing::ValuesIn(opType),
::testing::ValuesIn(netPrecisions),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::ValuesIn(inputType),
::testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_static)),
::testing::ValuesIn(op_types),
::testing::ValuesIn(model_types),
::testing::ValuesIn(second_input_types),
::testing::Values(ov::test::utils::DEVICE_CPU)),
MaxMinLayerTest::getTestCaseName);

View File

@ -4,38 +4,40 @@
#include <vector>
#include "single_layer_tests/mvn.hpp"
#include "single_op_tests/mvn.hpp"
#include "common_test_utils/test_constants.hpp"
using namespace LayerTestsDefinitions;
namespace {
using ov::test::Mvn1LayerTest;
using ov::test::Mvn6LayerTest;
const std::vector<bool> emptyAcrossChannels = {{}};
const std::vector<ngraph::AxisSet> emptyReductionAxes = {{}};
const std::vector<bool> empty_across_channels = {{}};
const std::vector<ngraph::AxisSet> empty_reduction_axes = {{}};
const std::vector<std::vector<size_t>> inputShapes = {
{8},
{1, 16},
{3, 19},
{1, 32, 17},
{1, 37, 9},
{1, 16, 5, 8},
{2, 19, 5, 10},
{7, 32, 2, 8},
{5, 8, 3, 5},
{4, 41, 6, 9},
{1, 32, 8, 1, 6},
{1, 9, 1, 15, 9},
{6, 64, 6, 1, 18},
{2, 31, 2, 9, 1},
{10, 16, 5, 10, 6}
const std::vector<std::vector<ov::Shape>> input_shapes_static = {
{{8}},
{{1, 16}},
{{3, 19}},
{{1, 32, 17}},
{{1, 37, 9}},
{{1, 16, 5, 8}},
{{2, 19, 5, 10}},
{{7, 32, 2, 8}},
{{5, 8, 3, 5}},
{{4, 41, 6, 9}},
{{1, 32, 8, 1, 6}},
{{1, 9, 1, 15, 9}},
{{6, 64, 6, 1, 18}},
{{2, 31, 2, 9, 1}},
{{10, 16, 5, 10, 6}}
};
const std::vector<bool> acrossChannels = {
const std::vector<bool> across_channels = {
true,
false
};
const std::vector<bool> normalizeVariance = {
const std::vector<bool> normalize_variance = {
true,
false
};
@ -44,39 +46,44 @@ const std::vector<double> epsilon = {
0.000000001
};
std::vector<InferenceEngine::Precision> dataPrecisions = {
InferenceEngine::Precision::FP16,
InferenceEngine::Precision::FP32
std::vector<ov::element::Type> model_types = {
ov::element::f16,
ov::element::f32
};
const auto MvnAcrossChannels = ::testing::Combine(
::testing::ValuesIn(inputShapes),
::testing::ValuesIn(dataPrecisions),
::testing::ValuesIn(emptyReductionAxes),
::testing::ValuesIn(acrossChannels),
::testing::ValuesIn(normalizeVariance),
const auto Mvnacross_channels = ::testing::Combine(
::testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_static)),
::testing::ValuesIn(model_types),
::testing::ValuesIn(empty_reduction_axes),
::testing::ValuesIn(across_channels),
::testing::ValuesIn(normalize_variance),
::testing::ValuesIn(epsilon),
::testing::Values(ov::test::utils::DEVICE_CPU)
);
const std::vector<std::vector<ov::Shape>> input_shapes_reduction_axes_static = {
{{1, 10, 5, 17}},
{{1, 3, 8, 9}}
};
const auto MvnReductionAxes = ::testing::Combine(
::testing::ValuesIn(std::vector<std::vector<size_t>>{{1, 10, 5, 17}, {1, 3, 8, 9}}),
::testing::Values(InferenceEngine::Precision::FP32),
::testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_reduction_axes_static)),
::testing::Values(ov::element::f32),
::testing::ValuesIn(std::vector<ngraph::AxisSet>{{1, 2, 3}, {2, 3}}),
::testing::ValuesIn(emptyAcrossChannels),
::testing::ValuesIn(normalizeVariance),
::testing::ValuesIn(empty_across_channels),
::testing::ValuesIn(normalize_variance),
::testing::ValuesIn(epsilon),
::testing::Values(ov::test::utils::DEVICE_CPU)
);
INSTANTIATE_TEST_SUITE_P(smoke_TestsMVN_AcrossChannels, Mvn1LayerTest, MvnAcrossChannels, Mvn1LayerTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_TestsMVN_across_channels, Mvn1LayerTest, Mvnacross_channels, Mvn1LayerTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_TestsMVN_ReductionAxes, Mvn1LayerTest, MvnReductionAxes, Mvn1LayerTest::getTestCaseName);
std::vector<InferenceEngine::Precision> idxPrecisions = {
InferenceEngine::Precision::I32,
InferenceEngine::Precision::I64
std::vector<ov::element::Type> idx_types = {
ov::element::i32,
ov::element::i64
};
const std::vector<std::string> epsMode = {
@ -88,98 +95,139 @@ const std::vector<float> epsilonF = {
0.0001f
};
const std::vector<std::vector<ov::Shape>> input_shapes_5d_static = {
{{1, 10, 5, 7, 8}},
{{1, 3, 8, 9, 49}}
};
INSTANTIATE_TEST_SUITE_P(smoke_MVN_5D, Mvn6LayerTest,
::testing::Combine(
::testing::ValuesIn(std::vector<std::vector<size_t>>{{1, 10, 5, 7, 8}, {1, 3, 8, 9, 49}}),
::testing::ValuesIn(dataPrecisions),
::testing::ValuesIn(idxPrecisions),
::testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_5d_static)),
::testing::ValuesIn(model_types),
::testing::ValuesIn(idx_types),
::testing::ValuesIn(std::vector<std::vector<int>>{{1, 2, 3, 4}, {2, 3, 4}, {-3, -2, -1}, {-1, -4, -2, -3}}),
::testing::ValuesIn(normalizeVariance),
::testing::ValuesIn(normalize_variance),
::testing::ValuesIn(epsilonF),
::testing::ValuesIn(epsMode),
::testing::Values(ov::test::utils::DEVICE_CPU)),
Mvn6LayerTest::getTestCaseName);
const std::vector<std::vector<ov::Shape>> input_shapes_4d_static = {
{{1, 10, 5, 17}},
{{1, 3, 8, 9}}
};
INSTANTIATE_TEST_SUITE_P(smoke_MVN_4D, Mvn6LayerTest,
::testing::Combine(
::testing::ValuesIn(std::vector<std::vector<size_t>>{{1, 10, 5, 17}, {1, 3, 8, 9}}),
::testing::ValuesIn(dataPrecisions),
::testing::ValuesIn(idxPrecisions),
::testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_4d_static)),
::testing::ValuesIn(model_types),
::testing::ValuesIn(idx_types),
::testing::ValuesIn(std::vector<std::vector<int>>{{1, 2, 3}, {2, 3}, {-2, -1}, {-2, -1, -3}}),
::testing::ValuesIn(normalizeVariance),
::testing::ValuesIn(normalize_variance),
::testing::ValuesIn(epsilonF),
::testing::ValuesIn(epsMode),
::testing::Values(ov::test::utils::DEVICE_CPU)),
Mvn6LayerTest::getTestCaseName);
const std::vector<std::vector<ov::Shape>> input_shapes_3d_static = {
{{1, 32, 17}},
{{1, 37, 9}}
};
INSTANTIATE_TEST_SUITE_P(smoke_MVN_3D, Mvn6LayerTest,
::testing::Combine(
::testing::ValuesIn(std::vector<std::vector<size_t>>{{1, 32, 17}, {1, 37, 9}}),
::testing::ValuesIn(dataPrecisions),
::testing::ValuesIn(idxPrecisions),
::testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_3d_static)),
::testing::ValuesIn(model_types),
::testing::ValuesIn(idx_types),
::testing::ValuesIn(std::vector<std::vector<int>>{{1, 2}, {2}, {-1}, {-1, -2}}),
::testing::ValuesIn(normalizeVariance),
::testing::ValuesIn(normalize_variance),
::testing::ValuesIn(epsilonF),
::testing::ValuesIn(epsMode),
::testing::Values(ov::test::utils::DEVICE_CPU)),
Mvn6LayerTest::getTestCaseName);
const std::vector<std::vector<ov::Shape>> input_shapes_2d_static = {
{{3, 5}},
{{2, 55}}
};
INSTANTIATE_TEST_SUITE_P(smoke_MVN_2D, Mvn6LayerTest,
::testing::Combine(
::testing::ValuesIn(std::vector<std::vector<size_t>>{{3, 5}, {2, 55}}),
::testing::ValuesIn(dataPrecisions),
::testing::ValuesIn(idxPrecisions),
::testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_2d_static)),
::testing::ValuesIn(model_types),
::testing::ValuesIn(idx_types),
::testing::ValuesIn(std::vector<std::vector<int>>{{1}}),
::testing::ValuesIn(normalizeVariance),
::testing::ValuesIn(normalize_variance),
::testing::ValuesIn(epsilonF),
::testing::ValuesIn(epsMode),
::testing::Values(ov::test::utils::DEVICE_CPU)),
Mvn6LayerTest::getTestCaseName);
const std::vector<std::vector<ov::Shape>> input_shapes_1d_static = {
{{3}},
{{9}},
{{55}}
};
INSTANTIATE_TEST_SUITE_P(smoke_MVN_1D, Mvn6LayerTest,
::testing::Combine(
::testing::ValuesIn(std::vector<std::vector<size_t>>{{3}, {9}, {55}}),
::testing::ValuesIn(dataPrecisions),
::testing::ValuesIn(idxPrecisions),
::testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_1d_static)),
::testing::ValuesIn(model_types),
::testing::ValuesIn(idx_types),
::testing::ValuesIn(std::vector<std::vector<int>>{{0}}),
::testing::ValuesIn(normalizeVariance),
::testing::ValuesIn(normalize_variance),
::testing::ValuesIn(epsilonF),
::testing::ValuesIn(epsMode),
::testing::Values(ov::test::utils::DEVICE_CPU)),
Mvn6LayerTest::getTestCaseName);
const std::vector<std::vector<ov::Shape>> input_shapes_decomposition_3d_static = {
{{1, 32, 17}},
{{1, 37, 9}}
};
INSTANTIATE_TEST_SUITE_P(smoke_Decomposition_3D, Mvn6LayerTest,
::testing::Combine(
::testing::ValuesIn(std::vector<std::vector<size_t>>{{1, 32, 17}, {1, 37, 9}}),
::testing::ValuesIn(dataPrecisions),
::testing::ValuesIn(idxPrecisions),
::testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_decomposition_3d_static)),
::testing::ValuesIn(model_types),
::testing::ValuesIn(idx_types),
::testing::ValuesIn(std::vector<std::vector<int>>{{0, 1, 2}, {0}, {1}}),
::testing::ValuesIn(normalizeVariance),
::testing::ValuesIn(normalize_variance),
::testing::ValuesIn(epsilonF),
::testing::ValuesIn(epsMode),
::testing::Values(ov::test::utils::DEVICE_CPU)),
Mvn6LayerTest::getTestCaseName);
const std::vector<std::vector<ov::Shape>> input_shapes_decomposition_4d_static = {
{{1, 16, 5, 8}},
{{2, 19, 5, 10}}
};
INSTANTIATE_TEST_SUITE_P(smoke_Decomposition_4D, Mvn6LayerTest,
::testing::Combine(
::testing::ValuesIn(std::vector<std::vector<size_t>>{{1, 16, 5, 8}, {2, 19, 5, 10}}),
::testing::ValuesIn(dataPrecisions),
::testing::ValuesIn(idxPrecisions),
::testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_decomposition_4d_static)),
::testing::ValuesIn(model_types),
::testing::ValuesIn(idx_types),
::testing::ValuesIn(std::vector<std::vector<int>>{{0, 1, 2, 3}, {0, 1, 2}, {0, 3}, {0}, {1}, {2}, {3}}),
::testing::ValuesIn(normalizeVariance),
::testing::ValuesIn(normalize_variance),
::testing::ValuesIn(epsilonF),
::testing::ValuesIn(epsMode),
::testing::Values(ov::test::utils::DEVICE_CPU)),
Mvn6LayerTest::getTestCaseName);
const std::vector<std::vector<ov::Shape>> input_shapes_decomposition_10d_static = {
{{1, 3, 5, 4, 2, 6, 5, 3, 2, 1}},
};
INSTANTIATE_TEST_SUITE_P(smoke_Decomposition_10D, Mvn6LayerTest,
::testing::Combine(
::testing::ValuesIn(std::vector<std::vector<size_t>>{{1, 3, 5, 4, 2, 6, 5, 3, 2, 1}}),
::testing::ValuesIn(dataPrecisions),
::testing::ValuesIn(idxPrecisions),
::testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_decomposition_10d_static)),
::testing::ValuesIn(model_types),
::testing::ValuesIn(idx_types),
::testing::ValuesIn(std::vector<std::vector<int>>{{0, 1, 5, 8, 9}, {0, 1, 2, 3}, {0, 1, 2}, {0, 3}, {0}, {3}, {5}, {9}}),
::testing::ValuesIn(normalizeVariance),
::testing::ValuesIn(normalize_variance),
::testing::ValuesIn(epsilonF),
::testing::ValuesIn(epsMode),
::testing::Values(ov::test::utils::DEVICE_CPU)),
Mvn6LayerTest::getTestCaseName);
} // namespace

View File

@ -0,0 +1,15 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "shared_test_classes/single_op/mat_mul.hpp"
namespace ov {
namespace test {
TEST_P(MatMulLayerTest, Inference) {
run();
};
} // namespace test
} // namespace ov

View File

@ -0,0 +1,15 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "shared_test_classes/single_op/minimum_maximum.hpp"
namespace ov {
namespace test {
TEST_P(MaxMinLayerTest, Inference){
run();
};
} // namespace test
} // namespace ov

View File

@ -0,0 +1,19 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "shared_test_classes/single_op/mvn.hpp"
namespace ov {
namespace test {
TEST_P(Mvn1LayerTest, Inference) {
run();
};
TEST_P(Mvn6LayerTest, Inference) {
run();
};
} // namespace test
} // namespace ov

View File

@ -0,0 +1,34 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <string>
#include <tuple>
#include <vector>
#include "shared_test_classes/base/ov_subgraph.hpp"
#include "common_test_utils/test_enums.hpp"
namespace ov {
namespace test {
typedef std::tuple<
std::vector<InputShape>, // Input Shapes
std::pair<bool, bool>, // Transpose inputs
ov::element::Type, // Model type
ov::test::utils::InputLayerType, // Secondary input type
std::string, // Device name
std::map<std::string, std::string> // Additional network configuration
> MatMulLayerTestParamsSet;
class MatMulLayerTest : public testing::WithParamInterface<MatMulLayerTestParamsSet>,
virtual public ov::test::SubgraphBaseTest {
public:
static std::string getTestCaseName(const testing::TestParamInfo<MatMulLayerTestParamsSet> &obj);
protected:
void SetUp() override;
};
} // namespace test
} // namespace ov

View File

@ -0,0 +1,32 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <tuple>
#include <string>
#include <vector>
#include "shared_test_classes/base/ov_subgraph.hpp"
#include "common_test_utils/test_enums.hpp"
namespace ov {
namespace test {
using MaxMinParamsTuple = typename std::tuple<
std::vector<InputShape>, // Input shapes
ov::test::utils::MinMaxOpType, // Operation type
ov::element::Type, // Model type
ov::test::utils::InputLayerType, // Secondary input type
std::string>; // Device name
class MaxMinLayerTest:
public testing::WithParamInterface<MaxMinParamsTuple>,
virtual public ov::test::SubgraphBaseTest {
public:
static std::string getTestCaseName(const testing::TestParamInfo<MaxMinParamsTuple>& obj);
protected:
void SetUp() override;
};
} // namespace test
} // namespace ov

View File

@ -0,0 +1,54 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <tuple>
#include <string>
#include <vector>
#include "shared_test_classes/base/ov_subgraph.hpp"
namespace ov {
namespace test {
typedef std::tuple<
std::vector<InputShape>, // Input shapes
ov::element::Type, // Model type
ov::AxisSet, // Reduction axes
bool, // Across channels
bool, // Normalize variance
double, // Epsilon
std::string // Device name
> mvn1Params;
class Mvn1LayerTest : public testing::WithParamInterface<mvn1Params>,
virtual public ov::test::SubgraphBaseTest {
public:
static std::string getTestCaseName(const testing::TestParamInfo<mvn1Params>& obj);
protected:
void SetUp() override;
};
typedef std::tuple<
std::vector<InputShape>, // Input shapes
ov::element::Type, // Model type
ov::element::Type, // Axes type
std::vector<int>, // Axes
bool, // Normalize variance
float, // Epsilon
std::string, // Epsilon mode
std::string // Device name
> mvn6Params;
class Mvn6LayerTest : public testing::WithParamInterface<mvn6Params>,
virtual public ov::test::SubgraphBaseTest {
public:
static std::string getTestCaseName(const testing::TestParamInfo<mvn6Params>& obj);
protected:
void SetUp() override;
};
} // namespace test
} // namespace ov

View File

@ -0,0 +1,81 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "shared_test_classes/single_op/mat_mul.hpp"
#include "common_test_utils/ov_tensor_utils.hpp"
#include "openvino/op/parameter.hpp"
#include "openvino/op/result.hpp"
#include "openvino/op/constant.hpp"
#include "openvino/op/matmul.hpp"
namespace ov {
namespace test {
using ov::test::utils::InputLayerType;
std::string MatMulLayerTest::getTestCaseName(const testing::TestParamInfo<MatMulLayerTestParamsSet> &obj) {
std::vector<InputShape> shapes;
std::pair<bool, bool> transpose;
ov::element::Type model_type;
InputLayerType secondary_input_type;
std::string target_device;
std::map<std::string, std::string> additional_config;
std::tie(shapes, transpose, model_type, secondary_input_type, target_device, additional_config) = obj.param;
std::ostringstream result;
result << "IS=(";
for (size_t i = 0lu; i < shapes.size(); i++) {
result << ov::test::utils::partialShape2str({shapes[i].first}) << (i < shapes.size() - 1lu ? "_" : "");
}
result << ")_TS=";
for (size_t i = 0lu; i < shapes.front().second.size(); i++) {
result << "{";
for (size_t j = 0lu; j < shapes.size(); j++) {
result << ov::test::utils::vec2str(shapes[j].second[i]) << (j < shapes.size() - 1lu ? "_" : "");
}
result << "}_";
}
result << "transpose_a=" << transpose.first << "_";
result << "transpose_b=" << transpose.second << "_";
result << "secondary_input_type=" << secondary_input_type << "_";
result << "modelType=" << model_type.get_type_name() << "_";
result << "trgDev=" << target_device;
result << "config=(";
for (const auto& configEntry : additional_config) {
result << configEntry.first << ", " << configEntry.second << ";";
}
result << ")";
return result.str();
}
void MatMulLayerTest::SetUp() {
std::vector<InputShape> shapes;
std::pair<bool, bool> transpose;
ov::element::Type model_type;
InputLayerType secondary_input_type;
std::map<std::string, std::string> additional_config;
std::tie(shapes, transpose, model_type, secondary_input_type, targetDevice, additional_config) = this->GetParam();
init_input_shapes(shapes);
configuration.insert(additional_config.begin(), additional_config.end());
ov::ParameterVector params {std::make_shared<ov::op::v0::Parameter>(model_type, inputDynamicShapes[0])};
ov::NodeVector inputs {params[0]};
if (InputLayerType::PARAMETER == secondary_input_type) {
auto param = std::make_shared<ov::op::v0::Parameter>(model_type, inputDynamicShapes[1]);
params.push_back(param);
inputs.push_back(param);
} else {
auto tensor = ov::test::utils::create_and_fill_tensor(model_type, targetStaticShapes[0][1]);
auto constant = std::make_shared<ov::op::v0::Constant>(tensor);
inputs.push_back(constant);
}
auto mat_mul = std::make_shared<ov::op::v0::MatMul>(inputs[0], inputs[1], transpose.first, transpose.second);
auto result = std::make_shared<ov::op::v0::Result>(mat_mul);
function = std::make_shared<ov::Model>(result, params, "MatMul");
}
} // namespace test
} // namespace ov

View File

@ -0,0 +1,85 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "shared_test_classes/single_op/minimum_maximum.hpp"
#include "common_test_utils/ov_tensor_utils.hpp"
#include "openvino/op/parameter.hpp"
#include "openvino/op/result.hpp"
#include "openvino/op/constant.hpp"
#include "openvino/op/minimum.hpp"
#include "openvino/op/maximum.hpp"
namespace ov {
namespace test {
using ov::test::utils::InputLayerType;
using ov::test::utils::MinMaxOpType;
std::string MaxMinLayerTest::getTestCaseName(const testing::TestParamInfo<MaxMinParamsTuple> &obj) {
std::vector<InputShape> shapes;
ov::element::Type model_type;
std::string target_name;
InputLayerType second_input_type;
MinMaxOpType op_type;
std::tie(shapes, op_type, model_type, second_input_type, target_name) = obj.param;
std::ostringstream result;
result << "IS=(";
for (size_t i = 0lu; i < shapes.size(); i++) {
result << ov::test::utils::partialShape2str({shapes[i].first}) << (i < shapes.size() - 1lu ? "_" : "");
}
result << ")_TS=";
for (size_t i = 0lu; i < shapes.front().second.size(); i++) {
result << "{";
for (size_t j = 0lu; j < shapes.size(); j++) {
result << ov::test::utils::vec2str(shapes[j].second[i]) << (j < shapes.size() - 1lu ? "_" : "");
}
result << "}_";
}
result << "OpType=" << op_type << "_";
result << "SecondaryInputType=" << second_input_type << "_";
result << "netPRC=" << model_type.get_type_name() << "_";
result << "trgDev=" << target_name << "_";
return result.str();
}
void MaxMinLayerTest::SetUp() {
std::vector<InputShape> shapes;
ov::element::Type model_type;
InputLayerType second_input_type;
MinMaxOpType op_type;
std::tie(shapes, op_type, model_type, second_input_type, targetDevice) = this->GetParam();
init_input_shapes(shapes);
ov::ParameterVector params {std::make_shared<ov::op::v0::Parameter>(model_type, inputDynamicShapes[0])};
ov::NodeVector inputs {params[0]};
if (InputLayerType::PARAMETER == second_input_type) {
auto param = std::make_shared<ov::op::v0::Parameter>(model_type, inputDynamicShapes[1]);
params.push_back(param);
inputs.push_back(param);
} else {
auto tensor = ov::test::utils::create_and_fill_tensor(model_type, targetStaticShapes[0][1]);
auto constant = std::make_shared<ov::op::v0::Constant>(tensor);
inputs.push_back(constant);
}
std::shared_ptr<ov::Node> min_max_op;
switch (op_type) {
case MinMaxOpType::MINIMUM:
min_max_op = std::make_shared<ov::op::v1::Minimum>(inputs[0], inputs[1]);
break;
case MinMaxOpType::MAXIMUM:
min_max_op = std::make_shared<ov::op::v1::Maximum>(inputs[0], inputs[1]);
break;
default:
throw std::logic_error("Unsupported operation type");
}
auto result = std::make_shared<ov::op::v0::Result>(min_max_op);
function = std::make_shared<ov::Model>(result, params, "MinMax");
}
} // namespace test
} // namespace ov

View File

@ -0,0 +1,135 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "shared_test_classes/single_op/mvn.hpp"
#include "openvino/op/parameter.hpp"
#include "openvino/op/result.hpp"
#include "openvino/op/constant.hpp"
#include "openvino/op/mvn.hpp"
namespace ov {
namespace test {
std::string Mvn1LayerTest::getTestCaseName(const testing::TestParamInfo<mvn1Params>& obj) {
std::vector<InputShape> shapes;
ov::element::Type model_type;
ov::AxisSet axes;
bool across_channels, normalize_variance;
double eps;
std::string target_device;
std::tie(shapes, model_type, axes, across_channels, normalize_variance, eps, target_device) = obj.param;
std::ostringstream result;
result << "IS=(";
for (size_t i = 0lu; i < shapes.size(); i++) {
result << ov::test::utils::partialShape2str({shapes[i].first}) << (i < shapes.size() - 1lu ? "_" : "");
}
result << ")_TS=";
for (size_t i = 0lu; i < shapes.front().second.size(); i++) {
result << "{";
for (size_t j = 0lu; j < shapes.size(); j++) {
result << ov::test::utils::vec2str(shapes[j].second[i]) << (j < shapes.size() - 1lu ? "_" : "");
}
result << "}_";
}
result << "ModelType=" << model_type.get_type_name() << "_";
if (!axes.empty()) {
result << "ReductionAxes=" << ov::test::utils::vec2str(axes.to_vector()) << "_";
} else {
result << "across_channels=" << (across_channels ? "TRUE" : "FALSE") << "_";
}
result << "normalize_variance=" << (normalize_variance ? "TRUE" : "FALSE") << "_";
result << "Epsilon=" << eps << "_";
result << "TargetDevice=" << target_device;
return result.str();
}
void Mvn1LayerTest::SetUp() {
std::vector<InputShape> shapes;
ov::element::Type model_type;
ov::AxisSet axes;
bool across_channels, normalize_variance;
double eps;
std::tie(shapes, model_type, axes, across_channels, normalize_variance, eps, targetDevice) = this->GetParam();
init_input_shapes(shapes);
auto param = std::make_shared<ov::op::v0::Parameter>(model_type, inputDynamicShapes.front());
std::shared_ptr<ov::op::v0::MVN> mvn;
if (axes.empty()) {
mvn = std::make_shared<ov::op::v0::MVN>(param, across_channels, normalize_variance, eps);
// OpenVINO MVN implementation implicitly adds 0th dimension to reduction axes set which is not valid behavior
ov::AxisSet axes;
const size_t startAxis = across_channels ? 1 : 2;
const size_t numOfDims = param->output(0).get_partial_shape().size();
for (size_t i = startAxis; i < numOfDims; i++)
axes.insert(i);
mvn->set_reduction_axes(axes);
} else {
mvn = std::make_shared<ov::op::v0::MVN>(param, axes, normalize_variance, eps);
}
auto result = std::make_shared<ov::op::v0::Result>(mvn);
function = std::make_shared<ov::Model>(result, ov::ParameterVector{param}, "MVN1");
}
std::string Mvn6LayerTest::getTestCaseName(const testing::TestParamInfo<mvn6Params>& obj) {
std::vector<InputShape> shapes;
ov::element::Type model_type;
ov::element::Type axis_type;
std::vector<int> axes;
bool normalize_variance;
float eps;
std::string eps_mode;
std::string target_device;
std::tie(shapes, model_type, axis_type, axes, normalize_variance, eps, eps_mode, target_device) = obj.param;
std::ostringstream result;
result << "IS=(";
for (size_t i = 0lu; i < shapes.size(); i++) {
result << ov::test::utils::partialShape2str({shapes[i].first}) << (i < shapes.size() - 1lu ? "_" : "");
}
result << ")_TS=";
for (size_t i = 0lu; i < shapes.front().second.size(); i++) {
result << "{";
for (size_t j = 0lu; j < shapes.size(); j++) {
result << ov::test::utils::vec2str(shapes[j].second[i]) << (j < shapes.size() - 1lu ? "_" : "");
}
result << "}_";
}
result << "ModelType=" << model_type.get_type_name() << "_";
result << "AxType=" << axis_type.get_type_name() << "_";
result << "Ax=" << ov::test::utils::vec2str(axes) << "_";
result << "NormVariance=" << (normalize_variance ? "TRUE" : "FALSE") << "_";
result << "Eps=" << eps << "_";
result << "EM=" << eps_mode << "_";
result << "TargetDevice=" << target_device;
return result.str();
}
void Mvn6LayerTest::SetUp() {
std::vector<InputShape> shapes;
ov::element::Type model_type;
ov::element::Type axis_type;
std::vector<int> axes;
bool normalize_variance;
float eps;
std::string eps_mode;
std::tie(shapes, model_type, axis_type, axes, normalize_variance, eps, eps_mode, targetDevice) = this->GetParam();
init_input_shapes(shapes);
auto param = std::make_shared<ov::op::v0::Parameter>(model_type, inputDynamicShapes.front());
auto axes_node = ov::op::v0::Constant::create(axis_type, ov::Shape{axes.size()}, axes);
ov::op::MVNEpsMode nEpsMode = ov::op::MVNEpsMode::INSIDE_SQRT;
if (eps_mode == "outside_sqrt")
nEpsMode = ov::op::MVNEpsMode::OUTSIDE_SQRT;
auto mvn = std::make_shared<ov::op::v6::MVN>(param, axes_node, normalize_variance, eps, nEpsMode);
auto result = std::make_shared<ov::op::v0::Result>(mvn);
function = std::make_shared<ov::Model>(result, ov::ParameterVector{param}, "MVN6");
}
} // namespace test
} // namespace ov