[GNA] Added mode for Split and concat tests (#7773)

* added gna_mode to existing tests

* removed from tests_deprecated

* Commit for CI restart
This commit is contained in:
Andrey Noskov 2021-11-10 14:45:14 +03:00 committed by GitHub
parent 123efe2ecc
commit f5767df023
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 41 additions and 90 deletions

View File

@ -15,8 +15,10 @@ namespace {
InferenceEngine::Precision::FP32
};
std::map<std::string, std::string> config = {
{"GNA_COMPACT_MODE", "NO"}
std::vector<std::map<std::string, std::string>> additionalConfig = {
{ {"GNA_COMPACT_MODE", "NO"} },
{ {"GNA_COMPACT_MODE", "NO"},
{"GNA_DEVICE_MODE", "GNA_SW_FP32"}}
};
std::vector<size_t> inputSizes = {
@ -37,6 +39,6 @@ namespace {
::testing::Values(CommonTestUtils::DEVICE_GNA),
::testing::ValuesIn(inputSizes),
::testing::ValuesIn(hiddenSizes),
::testing::Values(config)),
::testing::ValuesIn(additionalConfig)),
ConcatQuantDuringMemoryRequantTest::getTestCaseName);
} // namespace

View File

@ -22,9 +22,15 @@ const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP16,
};
std::map<std::string, std::string> additional_config = {
{"GNA_DEVICE_MODE", "GNA_SW_EXACT"},
{"GNA_COMPACT_MODE", "NO"},
std::vector<std::map<std::string, std::string>> additional_config = {
{
{"GNA_DEVICE_MODE", "GNA_SW_EXACT"},
{"GNA_COMPACT_MODE", "NO"}
},
{
{"GNA_DEVICE_MODE", "GNA_SW_FP32"},
{"GNA_COMPACT_MODE", "NO"}
}
};
INSTANTIATE_TEST_SUITE_P(smoke_concat_first_input, ConcatFirstInputTest,
@ -32,7 +38,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_concat_first_input, ConcatFirstInputTest,
::testing::ValuesIn(inShapes),
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_GNA),
::testing::Values(additional_config)),
::testing::ValuesIn(additional_config)),
ConcatFirstInputTest::getTestCaseName);
} //namespace

View File

@ -27,10 +27,17 @@ std::vector<size_t> constant_sizes_unaligned = {
99
};
std::map<std::string, std::string> additional_config = {
{"GNA_COMPACT_MODE", "NO"},
{"GNA_DEVICE_MODE", "GNA_SW_EXACT"},
{"GNA_SCALE_FACTOR_0", "3276.8"},
std::vector<std::map<std::string, std::string>> additional_config = {
{
{"GNA_COMPACT_MODE", "NO"},
{"GNA_DEVICE_MODE", "GNA_SW_EXACT"},
{"GNA_SCALE_FACTOR_0", "3276.8"}
},
{
{"GNA_COMPACT_MODE", "NO"},
{"GNA_DEVICE_MODE", "GNA_SW_FP32"},
{"GNA_SCALE_FACTOR_0", "3276.8"}
},
};
} // namespace
@ -40,7 +47,7 @@ INSTANTIATE_TEST_SUITE_P(I_aligned_C_aligned, MultipleConcatTest,
::testing::Values(InferenceEngine::Precision::FP32),
::testing::ValuesIn(input_sizes_aligned),
::testing::ValuesIn(constant_sizes_aligned),
::testing::Values(additional_config)),
::testing::ValuesIn(additional_config)),
MultipleConcatTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(I_aligned_C_unaligned, MultipleConcatTest,
@ -49,7 +56,7 @@ INSTANTIATE_TEST_SUITE_P(I_aligned_C_unaligned, MultipleConcatTest,
::testing::Values(InferenceEngine::Precision::FP32),
::testing::ValuesIn(input_sizes_aligned),
::testing::ValuesIn(constant_sizes_unaligned),
::testing::Values(additional_config)),
::testing::ValuesIn(additional_config)),
MultipleConcatTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(I_unaligned_C_aligned, MultipleConcatTest,
@ -58,7 +65,7 @@ INSTANTIATE_TEST_SUITE_P(I_unaligned_C_aligned, MultipleConcatTest,
::testing::Values(InferenceEngine::Precision::FP32),
::testing::ValuesIn(input_sizes_unaligned),
::testing::ValuesIn(constant_sizes_aligned),
::testing::Values(additional_config)),
::testing::ValuesIn(additional_config)),
MultipleConcatTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(I_unaligned_C_unaligned, MultipleConcatTest,
@ -67,6 +74,6 @@ INSTANTIATE_TEST_SUITE_P(I_unaligned_C_unaligned, MultipleConcatTest,
::testing::Values(InferenceEngine::Precision::FP32),
::testing::ValuesIn(input_sizes_unaligned),
::testing::ValuesIn(constant_sizes_unaligned),
::testing::Values(additional_config)),
::testing::ValuesIn(additional_config)),
MultipleConcatTest::getTestCaseName);
} // namespace SubgraphTestsDefinitions

View File

@ -10,13 +10,17 @@ namespace SubgraphTestsDefinitions {
std::string targetName;
size_t inputSize;
size_t hiddenSize;
std::tie(netPrecision, targetName, inputSize, hiddenSize, std::ignore) = obj.param;
std::map<std::string, std::string> config;
std::tie(netPrecision, targetName, inputSize, hiddenSize, config) = obj.param;
std::ostringstream results;
results << "netPRC=" << netPrecision.name() << "_";
results << "IS=" << inputSize << "_";
results << "HS=" << hiddenSize << "_";
results << "targetDevice=" << targetName;
for (auto const& configItem : config) {
results << "_configItem=" << configItem.second;
}
return results.str();
}

View File

@ -18,7 +18,9 @@ std::string ConcatFirstInputTest::getTestCaseName(const testing::TestParamInfo<c
result << "IS=" << CommonTestUtils::vec2str(inputShapes) << "_";
result << "netPRC=" << netPrecision.name() << "_";
result << "targetDevice=" << targetDevice;
for (auto const& configItem : additional_config) {
result << "_configItem=" << configItem.first << "_" << configItem.second;
}
return result.str();
}

View File

@ -20,6 +20,9 @@ std::string MultipleConcatTest::getTestCaseName(const testing::TestParamInfo<mul
result << "IS=" << inputSize << "_";
result << "CS=" << constantSize << "_";
result << "targetDevice=" << targetDevice;
for (auto const& configItem : config) {
result << "_configItem=" << configItem.second;
}
return result.str();
}

View File

@ -41,22 +41,6 @@ static std::string getTestName(testing::TestParamInfo<FP32TestParams> obj) {
return "channels_" + std::to_string(obj.param.nChannels) + "_" + (obj.param.eltwise_type == FP32TestParams::eSumm ? "summ" : "mull");
}
TEST_P(GNAFP32ParametricTest, SplitFollowedByEltwiseMulOnAllignedCPU) {
auto c = GetParam().nChannels;
auto isMull = GetParam().eltwise_type == FP32TestParams::eMul;
std::vector<float> input_data1(c, 3.0);
std::vector<float> input_data2(c, 2.0);
std::vector<float> input_data;
input_data.insert(input_data.end(), input_data1.begin(), input_data1.end());
input_data.insert(input_data.end(), input_data2.begin(), input_data2.end());
std::vector<float> expected_result(c, isMull ? 6.0 : 5.0);
assert_that().onInferModel(EltwiseAfterSplitModel(c, isMull))
.inNotCompactMode().gna().propagate_forward().onCPU()
.called_with().input("input_1", input_data).equals_to(expected_result);
}
FP32TestParams gna_fp32_test_params[] = {
{7, FP32TestParams::eMul},
{7, FP32TestParams::eSumm},
@ -106,49 +90,6 @@ TEST_F(FP32NonQuantizedTest, SliceFollowedBy2FCsAnd2EltwisesOnCPU) {
.called_with_input_and_expected_output(input_data, expected_result);
}
TEST_F(FP32NonQuantizedTest, SplitAfterFCFollowedByFCAndEltwiseOnCPU) {
std::vector<float> input_data = {1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0};
std::vector<float> expected_result = {232.0, 232.0, 232.0, 232.0, 232.0,
232.0, 232.0, 232.0, 232.0, 232.0};
assert_that().onInferModel(FCBeforeSplitModel())
.inNotCompactMode().gna().propagate_forward().onCPU()
.called_with_input_and_expected_output(input_data, expected_result);
}
TEST_F(FP32NonQuantizedTest, ConcatPropagateForwardWithSuccessOnCPU) {
std::vector<float> input_data = {1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0};
std::vector<float> expected_result = {121.0, 121.0, 121.0, 121.0, 121.0,
121.0, 121.0, 121.0, 121.0, 121.0,
121.0, 121.0, 121.0, 121.0, 121.0,
121.0, 121.0, 121.0, 121.0, 121.0};
assert_that().onInferModel(concatModel())
.inNotCompactMode().gna().propagate_forward().onCPU()
.called_with_input_and_expected_output(input_data, expected_result);
}
TEST_F(FP32NonQuantizedTest, DoubleConcatPropagateForwardWithSuccessOnCPU) {
std::vector<float> input_data = {1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0};
std::vector<float> expected_result = {141.0, 141.0, 141.0, 141.0, 141.0,
141.0, 141.0, 141.0, 141.0, 141.0,
141.0, 141.0, 141.0, 141.0, 141.0,
141.0, 141.0, 141.0, 141.0, 141.0,
141.0, 141.0, 141.0, 141.0, 141.0,
141.0, 141.0, 141.0, 141.0, 141.0,
141.0, 141.0, 141.0, 141.0, 141.0,
141.0, 141.0, 141.0, 141.0, 141.0};
assert_that().onInferModel(doubleConcatModel())
.inNotCompactMode().gna().propagate_forward().onCPU()
.called_with_input_and_expected_output(input_data, expected_result);
}
TEST_F(FP32NonQuantizedTest, multiple_inputs_correct_results) {
std::vector<float> input_data = {1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0};
std::vector<float> input2_data = {2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0};
@ -301,19 +242,6 @@ TEST_F(FP32NonQuantizedTest, ScaleShiftWithBroadcastSupported) {
.called_with_input_and_expected_output(input_data, expected_result);
}
TEST_F(FP32NonQuantizedTest, ConcatWithConstInputPropagatedForward) {
std::vector<float> input_data = {1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0};
std::vector<float> expected_result = {121.0, 121.0, 121.0, 121.0, 121.0,
121.0, 121.0, 121.0, 121.0, 121.0,
121.0, 121.0, 121.0, 121.0, 121.0,
121.0, 121.0, 121.0, 121.0, 121.0};
assert_that().onInferModel(concatModelWithConstLayer())
.inNotCompactMode().gna().propagate_forward().onCPU()
.called_with_input(input_data).equals_to(expected_result);
}
TEST_F(FP32NonQuantizedTest, InputSplitConcatPropagateForward) {
std::vector<float> input_data(64, 1.0f);
std::vector<float> expected_result(10, 64.f);
@ -696,4 +624,3 @@ TEST_F(FP32NonQuantizedTest, ReshapeConvolutionLessThan48Filters) {
.called_with_input(input_data)
.equals_to(expected_result);
}