Skip CPU tests on ARM platform (#16891)

* [CPU] ARM architecture support

This patch extends existing CPU plugin capabilities with ARM CPUs optimized support

* Fixed undefined reference in unit tests

* refactoring

* Fixed Eltwise node behavior for ARM

* init commit

* tests passed

* fix skip failures

* Apply suggestions from code review

---------

Co-authored-by: dmitrygo <dmitry.gorokhov@intel.com>
Co-authored-by: Ilya Lavrenov <ilya.lavrenov@intel.com>
This commit is contained in:
Aleksandr Voron
2023-04-13 00:34:36 +02:00
committed by GitHub
parent 86142b0f4b
commit 73be9d31b6
32 changed files with 100 additions and 1 deletions

View File

@@ -16,7 +16,13 @@ if (ENABLE_OV_ONNX_FRONTEND)
else()
set(EXCLUDED_SOURCE_PATHS ${CMAKE_CURRENT_SOURCE_DIR}/extension ${CMAKE_CURRENT_SOURCE_DIR}/onnx)
endif()
if(ARM OR AARCH64)
list(APPEND EXCLUDED_SOURCE_PATHS
${CMAKE_CURRENT_SOURCE_DIR}/single_layer_tests
${CMAKE_CURRENT_SOURCE_DIR}/subgraph_tests
${CMAKE_CURRENT_SOURCE_DIR}/bfloat16
)
endif()
addIeTargetTest(
NAME ${TARGET_NAME}
ROOT ${CMAKE_CURRENT_SOURCE_DIR}

View File

@@ -248,6 +248,7 @@ TEST(OVClassBasicTest, smoke_SetConfigStreamsNum) {
}
TEST(OVClassBasicTest, smoke_SetConfigAffinity) {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
ov::Core ie;
ov::Affinity value = ov::Affinity::NONE;

View File

@@ -201,6 +201,64 @@ std::vector<std::string> disabledTestPatterns() {
// retVector.emplace_back(R"(.*smoke_LPT.*ReduceMinTransformation.*f32.*)");
#endif
#if defined(OPENVINO_ARCH_ARM64) || defined(OPENVINO_ARCH_ARM)
retVector.emplace_back(R"(OVClassBasicTest.smoke_SetConfigAffinity.*)");
retVector.emplace_back(R"(OVClassConfigTestCPU.smoke_Check(Model|Core)StreamsHasHigherPriorityThanLatencyHint.*)");
retVector.emplace_back(R"(.*OVInferRequestDynamicTests.*)");
retVector.emplace_back(R"(ONNXQuantizedModels/QuantizedModelsTests.*)");
retVector.emplace_back(R"(smoke_serialization/ExecGraphSerializationTest.ExecutionGraph.*)");
retVector.emplace_back(R"(smoke_BehaviorTests/OVCompiledModelPropertiesDefaultTests.CheckDefaultValues.*)");
retVector.emplace_back(R"(smoke_BehaviorTests/CorrectConfigCheck.canSetConfigAndCheckGetConfig.*CPU_THROUGHPUT_STREAMS=8.*)");
retVector.emplace_back(R"(smoke_BehaviorTests/CorrectConfigCheck.canSetConfigTwiceAndCheckGetConfig.*CPU_THROUGHPUT_STREAMS=8.*)");
retVector.emplace_back(R"(smoke_Basic/DefaultConfigurationTest.checkDeviceDefaultConfigurationValue/configKey=CPU_BIND_THREAD.*)");
retVector.emplace_back(R"(smoke_ExecGraph/ExecGraphRuntimePrecision.CheckRuntimePrecision/Function=(EltwiseWithTwoDynamicInputs|FakeQuantizeRelu).*)");
retVector.emplace_back(R"(smoke_LPT.*)");
retVector.emplace_back(R"(smoke_CPU_OVClassLoadNetworkAndCheckWithSecondaryPropertiesTest/OVClassLoadNetworkAndCheckSecondaryPropertiesTest.LoadNetworkAndCheckSecondaryPropertiesTest.*)");
retVector.emplace_back(R"(smoke_CPU_OVClassLoadNetworkAndCheckWithSecondaryPropertiesDoubleTest/OVClassLoadNetworkAndCheckSecondaryPropertiesTest.LoadNetworkAndCheckSecondaryPropertiesTest.*)");
retVector.emplace_back(R"(smoke_Activation_Basic/ActivationLayerTest.CompareWithRefs.*)");
retVector.emplace_back(R"(smoke_Integer_Activation_Basic/ActivationLayerTest.CompareWithRefs/(Tanh|Negative|Sqrt).*)");
retVector.emplace_back(R"(smoke_Activation_Basic_Prelu_Const/ActivationLayerTest.CompareWithRefs/(LeakyRelu|PReLu).*)");
retVector.emplace_back(R"(smoke_Activation_Basic_Prelu_Param/ActivationParamLayerTest.CompareWithRefs/(LeakyRelu|PReLu).*)");
retVector.emplace_back(R"(smoke_CompareWithRefs/ComparisonLayerTest.ComparisonTests.*)");
retVector.emplace_back(R"(smoke_DeformableConvolution2D_ExplicitPadding/DeformableConvolutionLayerTest.CompareWithRefs.*)");
retVector.emplace_back(R"(smoke_DeformableConvolution2D_AutoPadValid/DeformableConvolutionLayerTest.CompareWithRefs.*)");
retVector.emplace_back(R"(smoke_DeformableConvolution2D_DeformableGroups_ExplicitPadding/DeformableConvolutionLayerTest.CompareWithRefs.*)");
retVector.emplace_back(R"(smoke_DeformableConvolution2D_SingleTestCase/DeformableConvolutionLayerTest.CompareWithRefs.*)");
retVector.emplace_back(R"(smoke_DeformableConvolution2D_MultipleGroups/DeformableConvolutionLayerTest.CompareWithRefs.*)");
retVector.emplace_back(R"(smoke_DeformableConvolution2D_MultipleGroups_2/DeformableConvolutionLayerTest.CompareWithRefs.*)");
retVector.emplace_back(R"(smoke_CompareWithRefs_static/EltwiseLayerTest.EltwiseTests.*)");
retVector.emplace_back(R"(smoke_CompareWithRefs_static_check_collapsing/EltwiseLayerTest.EltwiseTests.*)");
retVector.emplace_back(R"(smoke_SingleThread/EltwiseLayerTest.EltwiseTests.*)");
retVector.emplace_back(R"(smoke_GRUSequenceCommonZeroClip/GRUSequenceTest.CompareWithRefs.*)");
retVector.emplace_back(R"(smoke_GRUSequenceCommonClip/GRUSequenceTest.CompareWithRefs.*)");
retVector.emplace_back(R"(smoke_StaticShapeLoop/StaticShapeLoopTest.CompareWithRefs.*)");
retVector.emplace_back(R"(smoke_StaticShapeLoop/StaticShapeLoopTest.CompareWithPredefinedRefs.*)");
retVector.emplace_back(R"(smoke_TrivialLoop/TrivialLoopTest.AutoSlicingInputWithDynCondition_(CheckPredefinedValues|CheckReference).*)");
retVector.emplace_back(R"(smoke_LSTMSequenceCommonZeroClip/LSTMSequenceTest.CompareWithRefs.*)");
retVector.emplace_back(R"(smoke_LSTMSequenceCommonClip/LSTMSequenceTest.CompareWithRefs.*)");
retVector.emplace_back(R"(smoke_MVN_1D/Mvn6LayerTest.CompareWithRefs.*)");
retVector.emplace_back(R"(smoke_Decomposition_(3|4|10)D/Mvn6LayerTest.CompareWithRefs.*)");
retVector.emplace_back(R"(smoke_AvgPool_ExplicitPad_CeilRounding/PoolingLayerTest.CompareWithRefs.*)");
retVector.emplace_back(R"(smoke_ReduceLogicalOneAxis/ReduceOpsLayerTest.CompareWithRefs.*)");
retVector.emplace_back(R"(smoke_Reduce_InputShapes/ReduceOpsLayerTest.CompareWithRefs.*)");
retVector.emplace_back(R"(smoke_ReduceLogical_ReductionTypes/ReduceOpsLayerTest.CompareWithRefs.*)");
retVector.emplace_back(R"(smoke_RNNSequenceCommonZeroClip/RNNSequenceTest.CompareWithRefs.*)");
retVector.emplace_back(R"(smoke_RNNSequenceCommonClip/RNNSequenceTest.CompareWithRefs.*)");
retVector.emplace_back(R"(smoke_INTEL_CPU_TestsDFT_(1|2|3|4)d/DFTLayerTest.CompareWithRefs.*)");
retVector.emplace_back(R"(smoke_INTEL_CPU_TestsSelect_none/SelectLayerTest.CompareWithRefImpl/COND=BOOL.*)");
retVector.emplace_back(R"(smoke_INTEL_CPU_TestsSelect_numpy/SelectLayerTest.CompareWithRefImpl/COND=BOOL.*)");
retVector.emplace_back(R"(smoke_SoftMax(2|4)D_dynamic/SoftMax8LayerTest.CompareWithRefs/NetType=f32_InType=undefined_OutType=undefined.*)");
retVector.emplace_back(R"(smoke_TopK/TopKLayerTest.CompareWithRefsDynamicBath.*)");
retVector.emplace_back(R"(smoke_Snippets.*)");
retVector.emplace_back(R"(smoke_Quantized.*)");
retVector.emplace_back(R"(smoke_NegativeQuantizedMatMulMultiplyFusion.*)");
retVector.emplace_back(R"(MultipleLSTMCellTest/MultipleLSTMCellTest.CompareWithRefs.*)");
retVector.emplace_back(R"(smoke_MultipleAdd_Nd/MultiplyAddLayerTest.CompareWithRefs.*)");
retVector.emplace_back(R"(smoke_MVNMultiplyAdd_1D/MVNMultiplyAdd.CompareWithRefs.*)");
retVector.emplace_back(R"(smoke_If/SimpleIfTest.CompareWithRefs.*)");
retVector.emplace_back(R"(smoke_If/SimpleIfNotConstConditionTest.CompareWithRefs.*)");
#endif
#if defined(_WIN32) || defined(_WIN64)
retVector.emplace_back(R"(.*LoadNetworkCompiledKernelsCacheTest.*CanCreateCacheDirAndDumpBinariesUnicodePath.*)");
#endif

View File

@@ -9,10 +9,12 @@
namespace SubgraphTestsDefinitions {
TEST_P(MatMulConstTransposesExtractionTest, CompareWithRefs) {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
Run();
}
TEST_P(QuantizedMatMulConstTransposesExtractionTest, CompareWithRefs) {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
Run();
}

View File

@@ -9,10 +9,12 @@
namespace SubgraphTestsDefinitions {
TEST_P(MatMulMultiplyFusion, CompareWithRefs) {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
Run();
}
TEST_P(QuantizedMatMulMultiplyFusion, CompareWithRefs) {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
Run();
}
} // namespace SubgraphTestsDefinitions

View File

@@ -9,6 +9,7 @@
namespace SubgraphTestsDefinitions {
TEST_P(QuantizedConvolutionBatchNorm, CompareWithRefs) {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
Run();
}

View File

@@ -19,6 +19,7 @@ std::string DefaultConfigurationTest::getTestCaseName(const ::testing::TestParam
}
TEST_P(DefaultConfigurationTest, checkDeviceDefaultConfigurationValue) {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
target_device = std::get<DeviceName>(GetParam());
std::string key;
InferenceEngine::Parameter parameter;

View File

@@ -79,6 +79,7 @@ void ConvolutionBackpropDataTransformation::Run() {
}
TEST_P(ConvolutionBackpropDataTransformation, CompareWithRefImpl) {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
Run();
};

View File

@@ -64,6 +64,7 @@ void ConvolutionQDqTransformation::Run() {
}
TEST_P(ConvolutionQDqTransformation, CompareWithRefImpl) {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
Run();
};

View File

@@ -66,6 +66,7 @@ void ConvolutionTransformation::Run() {
}
TEST_P(ConvolutionTransformation, CompareWithRefImpl) {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
Run();
};

View File

@@ -114,6 +114,7 @@ void ElementwiseBranchSelectionTransformation::Run() {
}
TEST_P(ElementwiseBranchSelectionTransformation, CompareWithRefImpl) {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
Run();
};

View File

@@ -66,6 +66,7 @@ void FakeQuantizeTransformation::Run() {
}
TEST_P(FakeQuantizeTransformation, CompareWithRefImpl) {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
Run();
};

View File

@@ -108,6 +108,7 @@ void GroupConvolutionTransformation::Run() {
}
TEST_P(GroupConvolutionTransformation, CompareWithRefImpl) {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
Run();
};

View File

@@ -64,6 +64,7 @@ void GroupConvolutionQDqTransformation::Run() {
}
TEST_P(GroupConvolutionQDqTransformation, CompareWithRefImpl) {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
Run();
};

View File

@@ -86,6 +86,7 @@ void MatMulWithConstantTransformation::Run() {
}
TEST_P(MatMulWithConstantTransformation, CompareWithRefImpl) {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
Run();
};

View File

@@ -74,6 +74,7 @@ void MoveFakeQuantizeTransformation::Run() {
}
TEST_P(MoveFakeQuantizeTransformation, CompareWithRefImpl) {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
Run();
};

View File

@@ -66,6 +66,7 @@ void MultiplyToGroupConvolutionTransformation::Run() {
}
TEST_P(MultiplyToGroupConvolutionTransformation, CompareWithRefImpl) {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
Run();
};

View File

@@ -97,6 +97,7 @@ void MultiplyTransformation::Run() {
}
TEST_P(MultiplyTransformation, CompareWithRefImpl) {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
Run();
};

View File

@@ -58,6 +58,7 @@ void PadTransformation::Run() {
}
TEST_P(PadTransformation, CompareWithRefImpl) {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
Run();
};

View File

@@ -88,6 +88,7 @@ void PullReshapeThroughDequantizationTransformation::Run() {
}
TEST_P(PullReshapeThroughDequantizationTransformation, CompareWithRefImpl) {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
Run();
};

View File

@@ -61,6 +61,7 @@ void ReduceMaxTransformation::Run() {
}
TEST_P(ReduceMaxTransformation, CompareWithRefImpl) {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
Run();
};

View File

@@ -69,6 +69,7 @@ void ReduceMeanTransformation::Run() {
}
TEST_P(ReduceMeanTransformation, CompareWithRefImpl) {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
Run();
};

View File

@@ -61,6 +61,7 @@ void ReduceMinTransformation::Run() {
}
TEST_P(ReduceMinTransformation, CompareWithRefImpl) {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
Run();
};

View File

@@ -61,6 +61,7 @@ void ReduceSumTransformation::Run() {
}
TEST_P(ReduceSumTransformation, CompareWithRefImpl) {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
Run();
};

View File

@@ -59,6 +59,7 @@ void ReshapeTransformation::Run() {
}
TEST_P(ReshapeTransformation, CompareWithRefImpl) {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
Run();
};

View File

@@ -56,6 +56,7 @@ void ShuffleChannelsTransformation::Run() {
}
TEST_P(ShuffleChannelsTransformation, CompareWithRefImpl) {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
Run();
};

View File

@@ -47,19 +47,23 @@ void QuantizedModelsTests::runModel(const char* model, const LayerInputTypes& ex
}
TEST_P(QuantizedModelsTests, MaxPoolQDQ) {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
runModel("max_pool_qdq.onnx", {{"890_original", {ngraph::element::u8}}}, 1e-5);
}
TEST_P(QuantizedModelsTests, MaxPoolFQ) {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
runModel("max_pool_fq.onnx", {{"887_original", {ngraph::element::u8}}}, 1e-5);
}
TEST_P(QuantizedModelsTests, ConvolutionQDQ) {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
// activations have type uint8 and weights int8
runModel("convolution_qdq.onnx", {{"908_original", {ngraph::element::u8, ngraph::element::i8}}}, 1.5e-2);
}
TEST_P(QuantizedModelsTests, ConvolutionFQ) {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
// activations have type uint8 and weights int8
runModel("convolution_fq.onnx", {{"902_original", {ngraph::element::u8, ngraph::element::i8}}}, 1.5e-2);
}

View File

@@ -66,6 +66,7 @@ void FakeQuantizeDecompositionTest::SetUp() {
}
TEST_P(FakeQuantizeDecompositionTest, CompareWithRefImpl) {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
run();
const auto operation = std::get<1>(this->GetParam());

View File

@@ -37,6 +37,7 @@ void SnippetsTestsCommon::validateNumSubgraphs() {
}
void SnippetsTestsCommon::validateOriginalLayersNamesByType(const std::string& layerType, const std::string& originalLayersNames) {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
const auto& compiled_model = compiledModel.get_runtime_model();
for (const auto& op : compiled_model->get_ops()) {
const auto& rtInfo = op->get_rt_info();

View File

@@ -101,6 +101,7 @@ void QuantizedMatMulConstTransposesExtractionTest::SetUp() {
}
void QuantizedMatMulConstTransposesExtractionTest::TearDown() {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
auto runtime_function = executableNetwork.GetExecGraphInfo().getFunction();
int ops_found = 0;
for (const auto& node : runtime_function->get_ordered_ops()) {

View File

@@ -115,6 +115,7 @@ void QuantizedMatMulMultiplyFusion::SetUp() {
}
void QuantizedMatMulMultiplyFusion::TearDown() {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
auto get_layer_type = [] (const std::shared_ptr<ngraph::Node>& node) -> const std::string& {
const auto& rt_info = node->get_rt_info();
auto it = rt_info.find(ExecGraphInfoSerialization::LAYER_TYPE);

View File

@@ -167,6 +167,7 @@ void QuantizedConvolutionBatchNorm::SetUp() {
}
void QuantizedConvolutionBatchNorm::TearDown() {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
auto get_layer_type = [] (const std::shared_ptr<ngraph::Node>& node) -> const std::string& {
const auto& rt_info = node->get_rt_info();
auto it = rt_info.find(ExecGraphInfoSerialization::LAYER_TYPE);