[CPU] [ARM] Enable Pooling SLT tests on ARM (#18013)

This commit is contained in:
Aleksandr Voron
2023-10-17 16:09:44 +02:00
committed by GitHub
parent 404f4e29b7
commit e87d147f4c
8 changed files with 924 additions and 741 deletions

View File

@@ -20,7 +20,8 @@ bool AclPoolingExecutor::isSupported(const TensorInfo& srcTensorInfo,
DataLayout dataLayout,
const VectorDims* indDims,
PoolingLayerInfo* pool_info,
Pooling3dLayerInfo* pool3d_info) {
Pooling3dLayerInfo* pool3d_info,
bool ignoreOutShapeErrors) {
unsigned int pad_left = (poolingAttrs.data_pad_begin.size() >= 2u) ? poolingAttrs.data_pad_begin[1] : poolingAttrs.data_pad_begin[0];
unsigned int pad_right = (poolingAttrs.data_pad_end.size() >= 2u) ? poolingAttrs.data_pad_end[1] : poolingAttrs.data_pad_end[0];
unsigned int pad_top = (poolingAttrs.data_pad_begin.size() >= 2u) ? poolingAttrs.data_pad_begin[0] : 0;
@@ -46,7 +47,12 @@ bool AclPoolingExecutor::isSupported(const TensorInfo& srcTensorInfo,
// The combination of parameters: NCHW + CEIL gives an accuracy problem in AvgPool.
// One workaround is to disable the ACL executor for these parameters.
// Then OneDNN will run this case in ACL backend as reorder -> NHWC -> reorder
if (dataLayout == arm_compute::DataLayout::NCHW && poolingAttrs.rounding == op::RoundingType::CEIL) return false;
if (pool_type == PoolingType::AVG &&
dataLayout == arm_compute::DataLayout::NCHW &&
poolingAttrs.rounding == op::RoundingType::CEIL) {
DEBUG_LOG("NCHW + CEIL gives an accuracy problem in ACL AvgPool. ACL executor will not be created.");
return false;
}
DimensionRoundingType round = (poolingAttrs.rounding == op::RoundingType::CEIL) ?
DimensionRoundingType::CEIL : DimensionRoundingType::FLOOR;
@@ -82,12 +88,22 @@ bool AclPoolingExecutor::isSupported(const TensorInfo& srcTensorInfo,
arm_compute::Status s = arm_compute::NEPoolingLayer::validate(&srcTensorInfo, &dstTensorInfo, *pool_info, &indTensorInfo);
if (!s) {
DEBUG_LOG("NEPoolingLayer validation with indices failed: ", s.error_description());
if (ignoreOutShapeErrors &&
s.error_description().find("Tensors have different shapes") != std::string::npos) {
DEBUG_LOG("Ignore shape error because the flag ignoreOutShapeErrors is set");
return true;
}
return false;
}
} else {
arm_compute::Status s = arm_compute::NEPoolingLayer::validate(&srcTensorInfo, &dstTensorInfo, *pool_info);
if (!s) {
DEBUG_LOG("NEPoolingLayer validation without indices failed: ", s.error_description());
if (ignoreOutShapeErrors &&
s.error_description().find("Tensors have different shapes") != std::string::npos) {
DEBUG_LOG("Ignore shape error because the flag ignoreOutShapeErrors is set");
return true;
}
return false;
}
}

View File

@@ -31,7 +31,8 @@ public:
arm_compute::DataLayout dataLayout,
const VectorDims* indDims,
arm_compute::PoolingLayerInfo* pool_info,
arm_compute::Pooling3dLayerInfo* pool3d_info);
arm_compute::Pooling3dLayerInfo* pool3d_info,
bool ignoreOutShapeErrors = false);
impl_desc_type getImplType() const override {
return implType;
@@ -79,8 +80,8 @@ public:
if (dstDescs.size() == 2u &&
dstDescs[1]->getPrecision() != InferenceEngine::Precision::U32) {
DEBUG_LOG("AclPoolingExecutor does not support precisions:",
" dst[1]=", dstDescs[1]->getPrecision());
DEBUG_LOG("AclPoolingExecutor supports U32 as indices precisions only. ",
"Passed indices precision: ", dstDescs[1]->getPrecision());
return false;
}

View File

@@ -271,14 +271,31 @@ void Pooling::getSupportedDescriptors() {
const auto &childShape = getOutputShapeAtPort(0);
const size_t inputRank = getInputShapeAtPort(0).getRank();
if (isDynamicNode()) {
inShape = MemoryDescUtils::makeDummyShape(parentShape);
const auto& origDims = parentShape.getDims();
const auto& origMaxDims = parentShape.getMaxDims();
auto inDims = inShape.getStaticDims();
for (size_t i = 0; i < inDims.size() - 2; i++) {
if (origDims[i + 2] == Shape::UNDEFINED_DIM) {
inDims[i + 2] = std::min<Dim>(origMaxDims[i + 2], std::max<Dim>(inDims[i + 2], poolingAttrs.kernel[i]));
}
}
inShape = Shape(inDims);
} else {
inShape = parentShape;
}
#if defined(OV_CPU_WITH_ACL)
// WA: we may specify any layout here (NCHW or NHWC) since both are supported by ACL
arm_compute::DataLayout dataLayout = (parentShape.getDims().size() == 5) ? arm_compute::DataLayout::NDHWC : arm_compute::DataLayout::NCHW;
arm_compute::TensorInfo srcTensorInfo = arm_compute::TensorInfo(shapeCast(parentShape.getDims()),
arm_compute::DataLayout dataLayout = (inShape.getDims().size() == 5) ? arm_compute::DataLayout::NDHWC : arm_compute::DataLayout::NCHW;
arm_compute::TensorInfo srcTensorInfo = arm_compute::TensorInfo(shapeCast(inShape.getDims()),
1,
precisionToAclDataType(inputPrecision),
dataLayout);
arm_compute::TensorInfo dstTensorInfo = arm_compute::TensorInfo(shapeCast(childShape.getDims()),
arm_compute::TensorInfo dstTensorInfo = arm_compute::TensorInfo(shapeCast(isDynamicNode() ? MemoryDescUtils::makeDummyShape(childShape).getDims() :
childShape.getDims()),
1,
precisionToAclDataType(outputPrecision),
dataLayout);
@@ -287,16 +304,19 @@ void Pooling::getSupportedDescriptors() {
useACL = AclPoolingExecutor::isSupported(srcTensorInfo,
dstTensorInfo,
poolingAttrs,
parentShape.getDims().size(),
inShape.getDims().size(),
getOriginalOutputsNumber(),
dataLayout,
(getOriginalOutputsNumber() > 1) ? &getOutputShapeAtPort(1).getDims() : nullptr,
&pool_info,
&pool3d_info);
&pool3d_info,
isDynamicNode());
//FIXME: 5D tensors case is not assigned to ACL because there is no way to check layout here
//NEPooling3dLayer supports NDHWC only
if (parentShape.getDims().size() == 5)
if (inShape.getDims().size() == 5) {
useACL = false;
DEBUG_LOG("FIXME: 5D tensors case is not assigned to ACL because there is no way to check layout in getSupportedDescriptors()");
}
#endif
if (useACL) return;
@@ -324,19 +344,7 @@ void Pooling::getSupportedDescriptors() {
if ((inputRank < 3) || (inputRank > 5))
IE_THROW() << "Pooling layer. Unsupported mode. Only 3D, 4D and 5D blobs are supported as input.";
inShape = MemoryDescUtils::makeDummyShape(parentShape);
if (isDynamicNode()) {
const auto& origDims = parentShape.getDims();
const auto& origMaxDims = parentShape.getMaxDims();
auto inDims = inShape.getStaticDims();
for (size_t i = 0; i < inDims.size() - 2; i++) {
if (origDims[i + 2] == Shape::UNDEFINED_DIM) {
inDims[i + 2] = std::min<Dim>(origMaxDims[i + 2], std::max<Dim>(inDims[i + 2], poolingAttrs.kernel[i]));
}
}
inShape = Shape(inDims);
}
initEffectiveAttributes(inShape,
MemoryDescUtils::makeDummyShape(childShape));
@@ -386,7 +394,12 @@ void Pooling::prepareParams() {
} else {
attr = initPrimitiveAttr();
}
if (isDynamicNode()) {
if (poolingAttrs.auto_pad) {
poolingAttrs.data_pad_begin = shapeInference->get_pads_begin();
poolingAttrs.data_pad_end = shapeInference->get_pads_end();
}
}
if (useACL) {
auto dstMemPtr = getChildEdgeAt(0)->getMemoryPtr();
auto srcMemPtr = getParentEdgeAt(0)->getMemoryPtr();
@@ -414,10 +427,6 @@ void Pooling::prepareParams() {
auto outDesc = getChildEdgesAtPort(0)[0]->getMemory().getDescWithType<DnnlMemoryDesc>();
if (isDynamicNode()) {
if (poolingAttrs.auto_pad) {
poolingAttrs.data_pad_begin = shapeInference->get_pads_begin();
poolingAttrs.data_pad_end = shapeInference->get_pads_end();
}
initEffectiveAttributes(inDesc->getShape(), outDesc->getShape());
}
@@ -593,18 +602,17 @@ void Pooling::initSupportedPrimitiveDescriptors() {
config.inConfs.resize(getParentEdges().size());
config.outConfs.resize(getOriginalOutputsNumber());
config.inConfs[0].setMemDesc(
creatorsMap.at(format)->createSharedDesc(getOriginalInputPrecisionAtPort(0), getInputShapeAtPort(0)));
config.outConfs[0].setMemDesc(
creatorsMap.at(format)->createSharedDesc(getOriginalOutputPrecisionAtPort(0), getOutputShapeAtPort(0)));
std::vector<MemoryDescPtr> srcMemoryDescs;
for (const auto& inConf : config.inConfs) {
srcMemoryDescs.push_back(inConf.getMemDesc());
for (size_t i = 0; i < config.inConfs.size(); i++) {
config.inConfs[i].setMemDesc(
creatorsMap.at(format)->createSharedDesc(getOriginalInputPrecisionAtPort(i), getInputShapeAtPort(i)));
srcMemoryDescs.push_back(config.inConfs[i].getMemDesc());
}
std::vector<MemoryDescPtr> dstMemoryDescs;
for (const auto& outConf : config.outConfs) {
dstMemoryDescs.push_back(outConf.getMemDesc());
for (size_t i = 0; i < config.outConfs.size(); i++) {
config.outConfs[i].setMemDesc(
creatorsMap.at(format)->createSharedDesc(getOriginalOutputPrecisionAtPort(i), getOutputShapeAtPort(i)));
dstMemoryDescs.push_back(config.outConfs[i].getMemDesc());
}
auto factory = std::make_shared<PoolingExecutorFactory>(

View File

@@ -0,0 +1,464 @@
// Copyright (C) 2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "gtest/gtest.h"
#include "pooling.hpp"
#include "test_utils/cpu_test_utils.hpp"
using namespace InferenceEngine;
using namespace CPUTestUtils;
using namespace ngraph::helpers;
using namespace ov::test;
namespace CPULayerTestsDefinitions {
std::string PoolingLayerCPUTest::getTestCaseName(const testing::TestParamInfo<poolLayerCpuTestParamsSet>& obj) {
LayerTestsDefinitions::poolSpecificParams basicParamsSet;
InputShape inputShapes;
ElementType inPrc;
bool isInt8;
CPUSpecificParams cpuParams;
fusingSpecificParams fusingParams;
std::tie(basicParamsSet, inputShapes, inPrc, isInt8, cpuParams, fusingParams) = obj.param;
ngraph::helpers::PoolingTypes poolType;
std::vector<size_t> kernel, stride;
std::vector<size_t> padBegin, padEnd;
ngraph::op::PadType padType;
ngraph::op::RoundingType roundingType;
bool excludePad;
std::tie(poolType, kernel, stride, padBegin, padEnd, roundingType, padType, excludePad) = basicParamsSet;
std::ostringstream results;
results << "IS=(";
results << ov::test::utils::partialShape2str({inputShapes.first}) << ")_";
results << "TS=";
for (const auto& shape : inputShapes.second) {
results << ov::test::utils::vec2str(shape) << "_";
}
results << "Prc=" << inPrc << "_";
switch (poolType) {
case ngraph::helpers::PoolingTypes::MAX:
results << "MaxPool_";
break;
case ngraph::helpers::PoolingTypes::AVG:
results << "AvgPool_";
results << "ExcludePad=" << excludePad << "_";
break;
}
results << "K" << ov::test::utils::vec2str(kernel) << "_";
results << "S" << ov::test::utils::vec2str(stride) << "_";
results << "PB" << ov::test::utils::vec2str(padBegin) << "_";
results << "PE" << ov::test::utils::vec2str(padEnd) << "_";
results << "Rounding=" << roundingType << "_";
results << "AutoPad=" << padType << "_";
results << "INT8=" << isInt8 << "_";
results << CPUTestsBase::getTestCaseName(cpuParams);
results << CpuTestWithFusing::getTestCaseName(fusingParams);
return results.str();
}
void PoolingLayerCPUTest::SetUp() {
targetDevice = ov::test::utils::DEVICE_CPU;
LayerTestsDefinitions::poolSpecificParams basicParamsSet;
InputShape inputShapes;
ElementType inPrc;
bool isInt8;
CPUSpecificParams cpuParams;
fusingSpecificParams fusingParams;
std::tie(basicParamsSet, inputShapes, inPrc, isInt8, cpuParams, fusingParams) = this->GetParam();
ngraph::helpers::PoolingTypes poolType;
std::vector<size_t> kernel, stride;
std::vector<size_t> padBegin, padEnd;
ngraph::op::PadType padType;
ngraph::op::RoundingType roundingType;
bool excludePad;
std::tie(poolType, kernel, stride, padBegin, padEnd, roundingType, padType, excludePad) = basicParamsSet;
std::tie(inFmts, outFmts, priority, selectedType) = cpuParams;
std::tie(postOpMgrPtr, fusedOps) = fusingParams;
if (selectedType.empty()) {
selectedType = getPrimitiveType();
}
if (isInt8)
selectedType = selectedType + "_I8";
else
selectedType = makeSelectedTypeStr(selectedType, inPrc);
init_input_shapes({inputShapes});
ov::ParameterVector params;
for (auto&& shape : inputDynamicShapes) {
params.push_back(std::make_shared<ov::op::v0::Parameter>(inPrc, shape));
}
std::shared_ptr<ngraph::Node> poolInput = params[0];
if (isInt8) {
ov::Shape newShape(poolInput->get_output_partial_shape(0).size(), 1);
poolInput = ngraph::builder::makeFakeQuantize(poolInput, inPrc, 256, newShape);
}
std::shared_ptr<ngraph::Node> pooling = ngraph::builder::makePooling(poolInput,
stride,
padBegin,
padEnd,
kernel,
roundingType,
padType,
excludePad,
poolType);
function = makeNgraphFunction(inPrc, params, pooling, "PoolingCPU");
}
std::string MaxPoolingV8LayerCPUTest::getTestCaseName(
const testing::TestParamInfo<maxPoolV8LayerCpuTestParamsSet>& obj) {
LayerTestsDefinitions::maxPoolV8SpecificParams basicParamsSet;
InputShape inputShapes;
ElementType inPrc;
CPUSpecificParams cpuParams;
std::tie(basicParamsSet, inputShapes, inPrc, cpuParams) = obj.param;
std::vector<size_t> kernel, stride, dilation;
std::vector<size_t> padBegin, padEnd;
ngraph::op::PadType padType;
ngraph::op::RoundingType roundingType;
ngraph::element::Type indexElementType;
int64_t axis;
std::tie(kernel, stride, dilation, padBegin, padEnd, indexElementType, axis, roundingType, padType) =
basicParamsSet;
std::ostringstream results;
results << "IS=(";
results << ov::test::utils::partialShape2str({inputShapes.first}) << ")_";
results << "TS=";
for (const auto& shape : inputShapes.second) {
results << ov::test::utils::vec2str(shape) << "_";
}
results << "Prc=" << inPrc << "_";
results << "MaxPool_";
results << "K" << ov::test::utils::vec2str(kernel) << "_";
results << "S" << ov::test::utils::vec2str(stride) << "_";
results << "D" << ov::test::utils::vec2str(dilation) << "_";
results << "PB" << ov::test::utils::vec2str(padBegin) << "_";
results << "PE" << ov::test::utils::vec2str(padEnd) << "_";
results << "Rounding=" << roundingType << "_";
results << "AutoPad=" << padType << "_";
results << CPUTestsBase::getTestCaseName(cpuParams);
return results.str();
}
void MaxPoolingV8LayerCPUTest::SetUp() {
targetDevice = ov::test::utils::DEVICE_CPU;
LayerTestsDefinitions::maxPoolV8SpecificParams basicParamsSet;
InputShape inputShapes;
ElementType inPrc;
CPUSpecificParams cpuParams;
std::tie(basicParamsSet, inputShapes, inPrc, cpuParams) = this->GetParam();
std::vector<size_t> kernel, stride, dilation;
std::vector<size_t> padBegin, padEnd;
ngraph::op::PadType padType;
ngraph::op::RoundingType roundingType;
ngraph::element::Type indexElementType;
int64_t axis;
std::tie(kernel, stride, dilation, padBegin, padEnd, indexElementType, axis, roundingType, padType) =
basicParamsSet;
std::tie(inFmts, outFmts, priority, selectedType) = cpuParams;
if (selectedType.empty()) {
selectedType = getPrimitiveType();
}
selectedType = makeSelectedTypeStr(selectedType, inPrc);
init_input_shapes({inputShapes});
ov::ParameterVector params;
for (auto&& shape : inputDynamicShapes) {
params.push_back(std::make_shared<ov::op::v0::Parameter>(inPrc, shape));
}
std::shared_ptr<ngraph::Node> pooling = ngraph::builder::makeMaxPoolingV8(params[0],
stride,
dilation,
padBegin,
padEnd,
kernel,
roundingType,
padType,
indexElementType,
axis);
pooling->get_rt_info() = getCPUInfo();
ngraph::ResultVector results{std::make_shared<ngraph::opset3::Result>(pooling->output(0))};
function = std::make_shared<ngraph::Function>(results, params, "MaxPooling");
}
TEST_P(PoolingLayerCPUTest, CompareWithRefs) {
run();
CheckPluginRelatedResults(compiledModel, "Pooling");
}
TEST_P(MaxPoolingV8LayerCPUTest, CompareWithRefs) {
run();
CheckPluginRelatedResults(compiledModel, "Pooling");
}
namespace Pooling {
// The combination of parameters: NCHW + CEIL gives an accuracy problem in ACL AvgPool
const ngraph::op::RoundingType expectedAvgRoundingType() {
#if defined(OPENVINO_ARCH_ARM) || defined(OPENVINO_ARCH_ARM64)
return ngraph::op::RoundingType::FLOOR;
#else
return ngraph::op::RoundingType::CEIL;
#endif
}
const std::vector<LayerTestsDefinitions::poolSpecificParams>& paramsMax3D() {
static const std::vector<LayerTestsDefinitions::poolSpecificParams> paramsMax3D = {
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {2}, {2}, {0}, {0},
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false },
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {4}, {2}, {0}, {0},
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false },
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {2}, {1}, {0}, {0},
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false },
};
return paramsMax3D;
}
const std::vector<LayerTestsDefinitions::poolSpecificParams>& paramsAvg3D() {
static const std::vector<LayerTestsDefinitions::poolSpecificParams> paramsAvg3D = {
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {3}, {1}, {1}, {0},
expectedAvgRoundingType(), ngraph::op::PadType::SAME_UPPER, false },
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {3}, {1}, {1}, {0},
expectedAvgRoundingType(), ngraph::op::PadType::EXPLICIT, true },
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {4}, {4}, {2}, {2},
expectedAvgRoundingType(), ngraph::op::PadType::EXPLICIT, true },
};
return paramsAvg3D;
}
const std::vector<ElementType>& inpOutPrecision() {
static const std::vector<ElementType> inpOutPrecision = {ElementType::f32/*, ElementType::bf16*/};
return inpOutPrecision;
}
const std::vector<LayerTestsDefinitions::poolSpecificParams>& paramsMax4D() {
static const std::vector<LayerTestsDefinitions::poolSpecificParams> paramsMax4D = {
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {2, 2}, {2, 2}, {0, 0}, {0, 0},
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_LOWER, false },
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {2, 2}, {2, 2}, {0, 0}, {0, 0},
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_UPPER, false },
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {4, 2}, {2, 2}, {0, 0}, {0, 0},
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false },
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {4, 2}, {2, 1}, {0, 0}, {0, 0},
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false },
};
return paramsMax4D;
}
const std::vector<LayerTestsDefinitions::maxPoolV8SpecificParams>& paramsMaxV84D() {
static const std::vector<LayerTestsDefinitions::maxPoolV8SpecificParams> paramsMaxV84D = {
LayerTestsDefinitions::maxPoolV8SpecificParams{ {2, 2}, {2, 2}, {1, 1}, {0, 0}, {0, 0},
ngraph::element::Type_t::i32, 0,
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_LOWER },
};
return paramsMaxV84D;
}
const std::vector<InputShape>& inputShapes3D() {
static const std::vector<InputShape> inputShapes3D = {
{ {}, {{3, 4, 64}} },
{ {}, {{2, 8, 12}} },
{ {}, {{1, 16, 12}} },
{ {}, {{1, 21, 4}} },
{ {}, {{1, 32, 8}} },
{
// dynamic
{-1, -1, -1},
// target
{
{1, 32, 8},
{1, 21, 4},
{2, 8, 12}
}
},
{
// dynamic
{{1, 5}, {4, 32}, {1, 64}},
// target
{
{3, 4, 64},
{1, 16, 12},
{1, 32, 8}
}
}
};
return inputShapes3D;
}
const std::vector<InputShape>& inputShapes4D() {
static const std::vector<InputShape> inputShapes4D = {
{ {}, {{3, 4, 64, 64}} },
{ {}, {{2, 8, 8, 12}} },
{ {}, {{1, 16, 16, 12}} },
{ {}, {{1, 21, 8, 4}} },
{ {}, {{1, 32, 8, 8}} },
{
// dynamic
{-1, -1, -1, -1},
// target
{
{1, 32, 8, 8},
{1, 21, 8, 4},
{2, 8, 8, 12},
{1, 96, 125, 125}
}
},
{
// dynamic
{{1, 5}, {4, 32}, {1, 64}, {1, 64}},
// target
{
{3, 4, 64, 64},
{1, 16, 16, 12},
{1, 32, 8, 8}
}
},
{
// dynamic
{{1, 10}, 16, 8, 8},
// target
{
{1, 16, 8, 8},
{2, 16, 8, 8},
}
}
};
return inputShapes4D;
}
const std::vector<InputShape>& inputShapes5D() {
static const std::vector<InputShape> inputShapes5D = {
{ {}, {{1, 4, 16, 16, 16}} },
{ {}, {{2, 8, 8, 8, 8}} },
{ {}, {{2, 16, 12, 16, 20}} },
{ {}, {{1, 19, 16, 20, 8}} },
{ {}, {{1, 32, 16, 8, 12}} },
{
// dynamic
{-1, -1, -1, -1, -1},
// target
{
{2, 8, 8, 8, 8},
{1, 19, 16, 20, 8},
{1, 4, 16, 16, 16}
}
},
{
// dynamic
{{1, 5}, {4, 32}, {1, 64}, {1, 64}, {1, 25}},
// target
{
{1, 4, 16, 16, 16},
{1, 32, 16, 8, 12},
{3, 16, 4, 8, 3}
}
}
};
return inputShapes5D;
}
const std::vector<LayerTestsDefinitions::maxPoolV8SpecificParams>& paramsMaxV85D() {
static const std::vector<LayerTestsDefinitions::maxPoolV8SpecificParams> paramsMaxV85D = {
LayerTestsDefinitions::maxPoolV8SpecificParams{ {2, 2, 2}, {1, 1, 1}, {1, 1, 1}, {0, 0, 0}, {0, 0, 0},
ngraph::element::Type_t::i32, 0,
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_LOWER },
};
return paramsMaxV85D;
}
const std::vector<LayerTestsDefinitions::poolSpecificParams>& paramsAvg4D() {
static const std::vector<LayerTestsDefinitions::poolSpecificParams> paramsAvg4D = {
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2}, {2, 2}, {1, 0}, {0, 0},
expectedAvgRoundingType(), ngraph::op::PadType::SAME_LOWER, true },
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2}, {2, 2}, {1, 0}, {0, 0},
expectedAvgRoundingType(), ngraph::op::PadType::SAME_UPPER, true },
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2}, {2, 2}, {1, 0}, {0, 0},
expectedAvgRoundingType(), ngraph::op::PadType::SAME_LOWER, false },
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2}, {2, 2}, {1, 0}, {0, 0},
expectedAvgRoundingType(), ngraph::op::PadType::SAME_UPPER, false },
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2}, {2, 2}, {0, 0}, {0, 0},
expectedAvgRoundingType(), ngraph::op::PadType::EXPLICIT, true },
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {4, 4}, {4, 4}, {2, 2}, {2, 2},
expectedAvgRoundingType(), ngraph::op::PadType::EXPLICIT, true },
};
return paramsAvg4D;
}
const std::vector<LayerTestsDefinitions::poolSpecificParams>& paramsAvg5D() {
static const std::vector<LayerTestsDefinitions::poolSpecificParams> paramsAvg5D = {
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2, 2}, {2, 2, 2}, {1, 0, 0}, {0, 0, 0},
expectedAvgRoundingType(), ngraph::op::PadType::SAME_LOWER, true },
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2, 2}, {2, 2, 2}, {1, 0, 0}, {0, 0, 0},
expectedAvgRoundingType(), ngraph::op::PadType::SAME_UPPER, true },
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2, 2}, {2, 2, 2}, {1, 0, 0}, {0, 0, 0},
expectedAvgRoundingType(), ngraph::op::PadType::SAME_LOWER, false },
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2, 2}, {2, 2, 2}, {1, 0, 0}, {0, 0, 0},
expectedAvgRoundingType(), ngraph::op::PadType::SAME_UPPER, false },
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2, 2}, {2, 2, 2}, {0, 0, 0}, {0, 0, 0},
expectedAvgRoundingType(), ngraph::op::PadType::EXPLICIT, true },
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {3, 3, 3}, {3, 3, 3}, {1, 1, 1}, {0, 0, 0},
expectedAvgRoundingType(), ngraph::op::PadType::EXPLICIT, true },
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {4, 4, 4}, {2, 2, 2}, {2, 2, 2}, {2, 2, 2},
expectedAvgRoundingType(), ngraph::op::PadType::EXPLICIT, true },
};
return paramsAvg5D;
}
const std::vector<LayerTestsDefinitions::poolSpecificParams>& paramsMax5D() {
static const std::vector<LayerTestsDefinitions::poolSpecificParams> paramsMax5D = {
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {2, 2, 2}, {1, 1, 1}, {0, 0, 0}, {0, 0, 0},
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_LOWER, false },
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {2, 2, 2}, {1, 1, 1}, {0, 0, 0}, {0, 0, 0},
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_UPPER, false },
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {2, 2, 2}, {1, 1, 1}, {1, 1, 1}, {1, 1, 1},
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false },
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {3, 3, 3}, {2, 2, 2}, {1, 1, 1}, {1, 1, 1},
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false },
};
return paramsMax5D;
}
const std::vector<LayerTestsDefinitions::poolSpecificParams>& paramsAvg4D_Large() {
static const std::vector<LayerTestsDefinitions::poolSpecificParams> paramsAvg4D_Large = {
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {65, 65}, {65, 65}, {0, 0}, {0, 0},
ngraph::op::RoundingType::FLOOR, ngraph::op::PadType::VALID, true },
};
return paramsAvg4D_Large;
}
const std::vector<InputShape>& inputShapes4D_Large() {
static const std::vector<InputShape> inputShapes4D_Large = {
{
// dynamic
{-1, -1, -1, -1},
// target
{
{1, 16, 65, 65},
{1, 8, 130, 130},
{1, 16, 65, 65}
}
},
};
return inputShapes4D_Large;
}
} // namespace Pooling
} // namespace CPULayerTestsDefinitions

View File

@@ -0,0 +1,69 @@
// Copyright (C) 2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "ov_models/builders.hpp"
#include "test_utils/cpu_test_utils.hpp"
#include "test_utils/fusing_test_utils.hpp"
#include "shared_test_classes/single_layer/pooling.hpp"
#include "shared_test_classes/base/ov_subgraph.hpp"
using namespace ov::test;
using namespace CPUTestUtils;
namespace CPULayerTestsDefinitions {
using poolLayerCpuTestParamsSet = std::tuple<LayerTestsDefinitions::poolSpecificParams,
InputShape,
ElementType,
bool,
CPUSpecificParams,
fusingSpecificParams>;
using maxPoolV8LayerCpuTestParamsSet = std::tuple<LayerTestsDefinitions::maxPoolV8SpecificParams,
InputShape,
ElementType,
CPUSpecificParams>;
class PoolingLayerCPUTest : public testing::WithParamInterface<poolLayerCpuTestParamsSet>,
virtual public SubgraphBaseTest, public CpuTestWithFusing {
public:
static std::string getTestCaseName(const testing::TestParamInfo<poolLayerCpuTestParamsSet>& obj);
protected:
void SetUp() override;
};
class MaxPoolingV8LayerCPUTest : public testing::WithParamInterface<maxPoolV8LayerCpuTestParamsSet>,
virtual public SubgraphBaseTest, public CPUTestsBase {
public:
static std::string getTestCaseName(const testing::TestParamInfo<maxPoolV8LayerCpuTestParamsSet>& obj);
protected:
void SetUp() override;
};
namespace Pooling {
const std::vector<ElementType>& inpOutPrecision();
const ngraph::op::RoundingType expectedAvgRoundingType();
const std::vector<LayerTestsDefinitions::poolSpecificParams>& paramsMax3D();
const std::vector<LayerTestsDefinitions::poolSpecificParams>& paramsAvg3D();
const std::vector<LayerTestsDefinitions::poolSpecificParams>& paramsMax4D();
const std::vector<LayerTestsDefinitions::maxPoolV8SpecificParams>& paramsMaxV84D();
const std::vector<LayerTestsDefinitions::maxPoolV8SpecificParams>& paramsMaxV85D();
const std::vector<InputShape>& inputShapes3D();
const std::vector<InputShape>& inputShapes4D();
const std::vector<InputShape>& inputShapes4D_Large();
const std::vector<InputShape>& inputShapes5D();
const std::vector<LayerTestsDefinitions::poolSpecificParams>& paramsAvg4D();
const std::vector<LayerTestsDefinitions::poolSpecificParams>& paramsAvg4D_Large();
const std::vector<LayerTestsDefinitions::poolSpecificParams>& paramsAvg5D();
const std::vector<LayerTestsDefinitions::poolSpecificParams>& paramsMax5D();
} // namespace Pooling
} // namespace CPULayerTestsDefinitions

View File

@@ -0,0 +1,181 @@
// Copyright (C) 2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "single_layer_tests/classes/pooling.hpp"
#include "shared_test_classes/single_layer/pooling.hpp"
#include "test_utils/cpu_test_utils.hpp"
#include "test_utils/fusing_test_utils.hpp"
using namespace InferenceEngine;
using namespace CPUTestUtils;
using namespace ngraph::helpers;
using namespace ov::test;
namespace CPULayerTestsDefinitions {
namespace Pooling {
static CPUSpecificParams expectedCpuConfig() {
#if defined(OPENVINO_ARCH_ARM) || defined(OPENVINO_ARCH_ARM64)
return CPUSpecificParams{{}, {}, {"acl"}, "acl"};
#else
return CPUSpecificParams{{}, {}, {"ref_any"}, "ref_any"};
#endif
}
const std::vector<CPUSpecificParams> vecCpuConfigs = {expectedCpuConfig()};
const std::vector<LayerTestsDefinitions::poolSpecificParams> paramsAvg3D_RefOnly = {
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2}, {2}, {2}, {2},
expectedAvgRoundingType(), ngraph::op::PadType::EXPLICIT, false },
};
INSTANTIATE_TEST_SUITE_P(smoke_MaxPool_CPU_3D, PoolingLayerCPUTest,
::testing::Combine(
::testing::ValuesIn(paramsMax3D()),
::testing::ValuesIn(inputShapes3D()),
::testing::ValuesIn((inpOutPrecision())),
::testing::Values(false),
::testing::ValuesIn(vecCpuConfigs),
::testing::Values(emptyFusingSpec)),
PoolingLayerCPUTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_AvgPool_CPU_3D, PoolingLayerCPUTest,
::testing::Combine(
::testing::ValuesIn(paramsAvg3D()),
::testing::ValuesIn(inputShapes3D()),
::testing::ValuesIn((inpOutPrecision())),
::testing::Values(false),
::testing::ValuesIn(vecCpuConfigs),
::testing::Values(emptyFusingSpec)),
PoolingLayerCPUTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_AvgPool_CPU_3D_NotOptimized, PoolingLayerCPUTest,
::testing::Combine(
::testing::ValuesIn(paramsAvg3D_RefOnly),
::testing::ValuesIn(inputShapes3D()),
::testing::ValuesIn((inpOutPrecision())),
::testing::Values(false),
::testing::Values(expectedCpuConfig()),
::testing::Values(emptyFusingSpec)),
PoolingLayerCPUTest::getTestCaseName);
const std::vector<LayerTestsDefinitions::poolSpecificParams> paramsAvg4D_RefOnly = {
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2}, {2, 2}, {2, 2}, {2, 2},
expectedAvgRoundingType(), ngraph::op::PadType::EXPLICIT, false },
};
INSTANTIATE_TEST_SUITE_P(smoke_MaxPool_CPU_4D, PoolingLayerCPUTest,
::testing::Combine(
::testing::ValuesIn(paramsMax4D()),
::testing::ValuesIn(inputShapes4D()),
::testing::ValuesIn((inpOutPrecision())),
::testing::Values(false),
::testing::ValuesIn(vecCpuConfigs),
::testing::Values(emptyFusingSpec)),
PoolingLayerCPUTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_MaxPoolV8_CPU_4D, MaxPoolingV8LayerCPUTest,
::testing::Combine(
::testing::ValuesIn(paramsMaxV84D()),
::testing::ValuesIn(inputShapes4D()),
::testing::ValuesIn((inpOutPrecision())),
::testing::ValuesIn(vecCpuConfigs)),
MaxPoolingV8LayerCPUTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_AvgPool_CPU_4D, PoolingLayerCPUTest,
::testing::Combine(
::testing::ValuesIn(paramsAvg4D()),
::testing::ValuesIn(inputShapes4D()),
::testing::ValuesIn((inpOutPrecision())),
::testing::Values(false),
::testing::ValuesIn(vecCpuConfigs),
::testing::Values(emptyFusingSpec)),
PoolingLayerCPUTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_AvgPool_CPU_4D_NotOptimized, PoolingLayerCPUTest,
::testing::Combine(
::testing::ValuesIn(paramsAvg4D_RefOnly),
::testing::ValuesIn(inputShapes4D()),
::testing::ValuesIn((inpOutPrecision())),
::testing::Values(false),
::testing::Values(expectedCpuConfig()),
::testing::Values(emptyFusingSpec)),
PoolingLayerCPUTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_AvgPool_CPU_Large, PoolingLayerCPUTest,
::testing::Combine(
::testing::ValuesIn(paramsAvg4D_Large()),
::testing::ValuesIn(inputShapes4D_Large()),
::testing::ValuesIn((inpOutPrecision())),
::testing::Values(false),
::testing::ValuesIn(vecCpuConfigs),
::testing::Values(emptyFusingSpec)),
PoolingLayerCPUTest::getTestCaseName);
const std::vector<LayerTestsDefinitions::maxPoolV8SpecificParams> paramsMaxV85D_ref = {
LayerTestsDefinitions::maxPoolV8SpecificParams{ {2, 2, 2}, {1, 1, 1}, {2, 2, 2}, {0, 0, 0}, {0, 0, 0},
ngraph::element::Type_t::i32, 0,
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_UPPER },
LayerTestsDefinitions::maxPoolV8SpecificParams{ {2, 2, 2}, {1, 1, 1}, {2, 2, 2}, {1, 1, 1}, {1, 1, 1},
ngraph::element::Type_t::i32, 0,
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT },
LayerTestsDefinitions::maxPoolV8SpecificParams{ {2, 3, 4}, {2, 2, 2}, {2, 1, 1}, {1, 1, 1}, {1, 2, 2},
ngraph::element::Type_t::i32, 0,
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT },
};
const std::vector<LayerTestsDefinitions::poolSpecificParams> paramsAvg5D_RefOnly = {
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2, 2}, {2, 2, 2}, {2, 2, 2}, {2, 2, 2},
expectedAvgRoundingType(), ngraph::op::PadType::EXPLICIT, false },
};
//FIXME: 5D cases are temporarly disabled on ARM because ACL support check in Pooling::getSupportedDescriptors() can't check layout
#if defined(OPENVINO_ARCH_X86) || defined(OPENVINO_ARCH_X86_64)
INSTANTIATE_TEST_SUITE_P(smoke_MaxPool_CPU_5D, PoolingLayerCPUTest,
::testing::Combine(
::testing::ValuesIn(paramsMax5D()),
::testing::ValuesIn(inputShapes5D()),
::testing::ValuesIn((inpOutPrecision())),
::testing::Values(false),
::testing::ValuesIn(vecCpuConfigs),
::testing::Values(emptyFusingSpec)),
PoolingLayerCPUTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_MaxPoolV8_CPU_5D, MaxPoolingV8LayerCPUTest,
::testing::Combine(
::testing::ValuesIn(paramsMaxV85D()),
::testing::ValuesIn(inputShapes5D()),
::testing::ValuesIn((inpOutPrecision())),
::testing::ValuesIn(vecCpuConfigs)),
MaxPoolingV8LayerCPUTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_MaxPoolV8_CPU_5D_ref, MaxPoolingV8LayerCPUTest,
::testing::Combine(
::testing::ValuesIn(paramsMaxV85D_ref),
::testing::ValuesIn(inputShapes5D()),
::testing::ValuesIn((inpOutPrecision())),
::testing::Values(expectedCpuConfig())),
MaxPoolingV8LayerCPUTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_AvgPool_CPU_5D, PoolingLayerCPUTest,
::testing::Combine(
::testing::ValuesIn(paramsAvg5D()),
::testing::ValuesIn(inputShapes5D()),
::testing::ValuesIn((inpOutPrecision())),
::testing::Values(false),
::testing::ValuesIn(vecCpuConfigs),
::testing::Values(emptyFusingSpec)),
PoolingLayerCPUTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_AvgPool_CPU_5D_NotOptimized, PoolingLayerCPUTest,
::testing::Combine(
::testing::ValuesIn(paramsAvg5D_RefOnly),
::testing::ValuesIn(inputShapes5D()),
::testing::ValuesIn((inpOutPrecision())),
::testing::Values(false),
::testing::Values(expectedCpuConfig()),
::testing::Values(emptyFusingSpec)),
PoolingLayerCPUTest::getTestCaseName);
#endif
} // namespace Pooling
} // namespace CPULayerTestsDefinitions

View File

@@ -0,0 +1,148 @@
// Copyright (C) 2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "single_layer_tests/classes/pooling.hpp"
#include "shared_test_classes/single_layer/pooling.hpp"
#include "test_utils/cpu_test_utils.hpp"
#include "test_utils/fusing_test_utils.hpp"
#include <ov_models/builders.hpp>
#include <common_test_utils/ov_tensor_utils.hpp>
using namespace InferenceEngine;
using namespace CPUTestUtils;
using namespace ngraph::helpers;
using namespace ov::test;
namespace CPULayerTestsDefinitions {
namespace Pooling {
namespace {
const auto ref = CPUSpecificParams{{}, {}, {"ref_any"}, "ref_any"};
const auto avx512 = CPUSpecificParams{{}, {}, {"jit_avx512"}, "jit_avx512"};
const auto avx = CPUSpecificParams{{}, {}, {"jit_avx"}, "jit_avx"};
const auto sse42 = CPUSpecificParams{{}, {}, {"jit_sse42"}, "jit_sse42"};
const std::vector<CPUSpecificParams> vecCpuConfigs = {sse42, avx, avx512};
const std::vector<LayerTestsDefinitions::maxPoolV8SpecificParams> paramsMaxV84D_ref = {
LayerTestsDefinitions::maxPoolV8SpecificParams{ {2, 2}, {2, 2}, {2, 2}, {0, 0}, {0, 0},
ngraph::element::Type_t::i32, 0,
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_UPPER },
LayerTestsDefinitions::maxPoolV8SpecificParams{ {4, 2}, {2, 2}, {1, 2}, {0, 0}, {0, 0},
ngraph::element::Type_t::i32, 0,
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT },
LayerTestsDefinitions::maxPoolV8SpecificParams{ {4, 2}, {2, 1}, {2, 2}, {0, 0}, {0, 0},
ngraph::element::Type_t::i32, 0,
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT },
};
INSTANTIATE_TEST_SUITE_P(smoke_MaxPoolV8_CPU_4D_ref, MaxPoolingV8LayerCPUTest,
::testing::Combine(
::testing::ValuesIn(paramsMaxV84D_ref),
::testing::ValuesIn(inputShapes4D()),
::testing::ValuesIn((inpOutPrecision())),
::testing::Values(ref)),
MaxPoolingV8LayerCPUTest::getTestCaseName);
const auto avx512_nhwc = CPUSpecificParams{{nhwc}, {nhwc}, {"jit_avx512"}, "jit_avx512"};
const auto avx512_ndhwc = CPUSpecificParams{{ndhwc}, {ndhwc}, {"jit_avx512"}, "jit_avx512"};
const auto avx2_nhwc = CPUSpecificParams{{nhwc}, {nhwc}, {"jit_avx2"}, "jit_avx2"};
const auto avx2_ndhwc = CPUSpecificParams{{ndhwc}, {ndhwc}, {"jit_avx2"}, "jit_avx2"};
const auto sse42_nhwc = CPUSpecificParams{{nhwc}, {nhwc}, {"jit_sse42"}, "jit_sse42"};
const auto sse42_ndhwc = CPUSpecificParams{{ndhwc}, {ndhwc}, {"jit_sse42"}, "jit_sse42"};
const std::vector<CPUSpecificParams> vecCpuConfigsFusing_4D = {sse42_nhwc, avx2_nhwc, avx512_nhwc};
const std::vector<CPUSpecificParams> vecCpuConfigsFusing_5D = {sse42_ndhwc, avx2_ndhwc, avx512_ndhwc};
std::vector<fusingSpecificParams> fusingParamsSet {
emptyFusingSpec,
fusingFakeQuantizePerTensor,
fusingFakeQuantizePerChannel,
};
const std::vector<InputShape> inputShapes4D_int8 = {
{ {}, {{3, 4, 64, 64}} },
{ {}, {{2, 8, 8, 12}} },
{ {}, {{1, 16, 16, 12}} },
{ {}, {{1, 21, 8, 4}} },
{ {}, {{1, 32, 8, 8}} },
{
// dynamic
{-1, 32, -1, -1},
// target
{
{1, 32, 8, 8},
{1, 32, 8, 4},
{2, 32, 8, 12},
{1, 32, 8, 8}
}
},
{
// dynamic
{{1, 5}, 16, {1, 64}, {1, 64}},
// target
{
{3, 16, 32, 32},
{1, 16, 16, 12},
{1, 16, 8, 8},
{3, 16, 32, 32},
}
}
};
INSTANTIATE_TEST_SUITE_P(smoke_AvgPool_CPU_4D_I8, PoolingLayerCPUTest,
::testing::Combine(
::testing::ValuesIn(paramsAvg4D()),
::testing::ValuesIn(inputShapes4D_int8),
::testing::Values(ElementType::f32),
::testing::Values(true),
::testing::ValuesIn(filterCPUInfoForDevice(vecCpuConfigsFusing_4D)),
::testing::ValuesIn(fusingParamsSet)),
PoolingLayerCPUTest::getTestCaseName);
const std::vector<InputShape> inputShapes5D_int8 = {
{ {}, {{1, 4, 16, 16, 16}} },
{ {}, {{2, 8, 8, 8, 8}} },
{ {}, {{2, 16, 12, 16, 20}} },
{ {}, {{1, 19, 16, 20, 8}} },
{ {}, {{1, 32, 16, 8, 12}} },
{
// dynamic
{-1, 32, -1, -1, -1},
// target
{
{2, 32, 8, 8, 8},
{1, 32, 16, 20, 8},
{1, 32, 16, 16, 16},
{2, 32, 8, 8, 8}
}
},
{
// dynamic
{{1, 5}, 16, {1, 64}, {1, 64}, {1, 25}},
// target
{
{1, 16, 16, 16, 16},
{1, 16, 16, 8, 12},
{2, 16, 8, 8, 8},
{1, 16, 16, 16, 16},
}
}
};
INSTANTIATE_TEST_SUITE_P(smoke_AvgPool_CPU_5D_I8, PoolingLayerCPUTest,
::testing::Combine(
::testing::ValuesIn(paramsAvg5D()),
::testing::ValuesIn(inputShapes5D_int8),
::testing::Values(ElementType::f32),
::testing::Values(true),
::testing::ValuesIn(filterCPUInfoForDevice(vecCpuConfigsFusing_5D)),
::testing::ValuesIn(fusingParamsSet)),
PoolingLayerCPUTest::getTestCaseName);
} // namespace
} // namespace Pooling
} // namespace CPULayerTestsDefinitions

View File

@@ -1,704 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "ov_models/builders.hpp"
#include "test_utils/cpu_test_utils.hpp"
#include "test_utils/fusing_test_utils.hpp"
#include "shared_test_classes/single_layer/pooling.hpp"
#include "shared_test_classes/base/ov_subgraph.hpp"
using namespace ov::test;
using namespace CPUTestUtils;
namespace CPULayerTestsDefinitions {
using poolLayerCpuTestParamsSet = std::tuple<LayerTestsDefinitions::poolSpecificParams,
InputShape,
ElementType,
bool,
CPUSpecificParams,
fusingSpecificParams>;
using maxPoolV8LayerCpuTestParamsSet = std::tuple<LayerTestsDefinitions::maxPoolV8SpecificParams,
InputShape,
ElementType,
CPUSpecificParams>;
class PoolingLayerCPUTest : public testing::WithParamInterface<poolLayerCpuTestParamsSet>,
virtual public SubgraphBaseTest, public CpuTestWithFusing {
public:
static std::string getTestCaseName(const testing::TestParamInfo<poolLayerCpuTestParamsSet>& obj) {
LayerTestsDefinitions::poolSpecificParams basicParamsSet;
InputShape inputShapes;
ElementType inPrc;
bool isInt8;
CPUSpecificParams cpuParams;
fusingSpecificParams fusingParams;
std::tie(basicParamsSet, inputShapes, inPrc, isInt8, cpuParams, fusingParams) = obj.param;
ngraph::helpers::PoolingTypes poolType;
std::vector<size_t> kernel, stride;
std::vector<size_t> padBegin, padEnd;
ngraph::op::PadType padType;
ngraph::op::RoundingType roundingType;
bool excludePad;
std::tie(poolType, kernel, stride, padBegin, padEnd, roundingType, padType, excludePad) = basicParamsSet;
std::ostringstream results;
results << "IS=(";
results << ov::test::utils::partialShape2str({inputShapes.first}) << ")_";
results << "TS=";
for (const auto& shape : inputShapes.second) {
results << ov::test::utils::vec2str(shape) << "_";
}
results << "Prc=" << inPrc << "_";
switch (poolType) {
case ngraph::helpers::PoolingTypes::MAX:
results << "MaxPool_";
break;
case ngraph::helpers::PoolingTypes::AVG:
results << "AvgPool_";
results << "ExcludePad=" << excludePad << "_";
break;
}
results << "K" << ov::test::utils::vec2str(kernel) << "_";
results << "S" << ov::test::utils::vec2str(stride) << "_";
results << "PB" << ov::test::utils::vec2str(padBegin) << "_";
results << "PE" << ov::test::utils::vec2str(padEnd) << "_";
results << "Rounding=" << roundingType << "_";
results << "AutoPad=" << padType << "_";
results << "INT8=" << isInt8 << "_";
results << CPUTestsBase::getTestCaseName(cpuParams);
results << CpuTestWithFusing::getTestCaseName(fusingParams);
return results.str();
}
protected:
void SetUp() override {
targetDevice = ov::test::utils::DEVICE_CPU;
LayerTestsDefinitions::poolSpecificParams basicParamsSet;
InputShape inputShapes;
ElementType inPrc;
bool isInt8;
CPUSpecificParams cpuParams;
fusingSpecificParams fusingParams;
std::tie(basicParamsSet, inputShapes, inPrc, isInt8, cpuParams, fusingParams) = this->GetParam();
ngraph::helpers::PoolingTypes poolType;
std::vector<size_t> kernel, stride;
std::vector<size_t> padBegin, padEnd;
ngraph::op::PadType padType;
ngraph::op::RoundingType roundingType;
bool excludePad;
std::tie(poolType, kernel, stride, padBegin, padEnd, roundingType, padType, excludePad) = basicParamsSet;
std::tie(inFmts, outFmts, priority, selectedType) = cpuParams;
std::tie(postOpMgrPtr, fusedOps) = fusingParams;
if (selectedType.empty()) {
selectedType = getPrimitiveType();
}
if (isInt8)
selectedType = selectedType + "_I8";
else
selectedType = makeSelectedTypeStr(selectedType, inPrc);
init_input_shapes({inputShapes});
ov::ParameterVector params;
for (auto&& shape : inputDynamicShapes) {
params.push_back(std::make_shared<ov::op::v0::Parameter>(inPrc, shape));
}
std::shared_ptr<ngraph::Node> poolInput = params[0];
if (isInt8) {
ov::Shape newShape(poolInput->get_output_partial_shape(0).size(), 1);
poolInput = ngraph::builder::makeFakeQuantize(poolInput, inPrc, 256, newShape);
}
std::shared_ptr<ngraph::Node> pooling = ngraph::builder::makePooling(poolInput,
stride,
padBegin,
padEnd,
kernel,
roundingType,
padType,
excludePad,
poolType);
function = makeNgraphFunction(inPrc, params, pooling, "PoolingCPU");
}
};
class MaxPoolingV8LayerCPUTest : public testing::WithParamInterface<maxPoolV8LayerCpuTestParamsSet>,
virtual public SubgraphBaseTest, public CPUTestsBase {
public:
static std::string getTestCaseName(const testing::TestParamInfo<maxPoolV8LayerCpuTestParamsSet>& obj) {
LayerTestsDefinitions::maxPoolV8SpecificParams basicParamsSet;
InputShape inputShapes;
ElementType inPrc;
CPUSpecificParams cpuParams;
std::tie(basicParamsSet, inputShapes, inPrc, cpuParams) = obj.param;
std::vector<size_t> kernel, stride, dilation;
std::vector<size_t> padBegin, padEnd;
ngraph::op::PadType padType;
ngraph::op::RoundingType roundingType;
ngraph::element::Type indexElementType;
int64_t axis;
std::tie(kernel, stride, dilation, padBegin, padEnd, indexElementType, axis, roundingType, padType) = basicParamsSet;
std::ostringstream results;
results << "IS=(";
results << ov::test::utils::partialShape2str({inputShapes.first}) << ")_";
results << "TS=";
for (const auto& shape : inputShapes.second) {
results << ov::test::utils::vec2str(shape) << "_";
}
results << "Prc=" << inPrc << "_";
results << "MaxPool_";
results << "K" << ov::test::utils::vec2str(kernel) << "_";
results << "S" << ov::test::utils::vec2str(stride) << "_";
results << "D" << ov::test::utils::vec2str(dilation) << "_";
results << "PB" << ov::test::utils::vec2str(padBegin) << "_";
results << "PE" << ov::test::utils::vec2str(padEnd) << "_";
results << "Rounding=" << roundingType << "_";
results << "AutoPad=" << padType << "_";
results << CPUTestsBase::getTestCaseName(cpuParams);
return results.str();
}
protected:
void SetUp() override {
targetDevice = ov::test::utils::DEVICE_CPU;
LayerTestsDefinitions::maxPoolV8SpecificParams basicParamsSet;
InputShape inputShapes;
ElementType inPrc;
CPUSpecificParams cpuParams;
std::tie(basicParamsSet, inputShapes, inPrc, cpuParams) = this->GetParam();
std::vector<size_t> kernel, stride, dilation;
std::vector<size_t> padBegin, padEnd;
ngraph::op::PadType padType;
ngraph::op::RoundingType roundingType;
ngraph::element::Type indexElementType;
int64_t axis;
std::tie(kernel, stride, dilation, padBegin, padEnd, indexElementType, axis, roundingType, padType) = basicParamsSet;
std::tie(inFmts, outFmts, priority, selectedType) = cpuParams;
if (selectedType.empty()) {
selectedType = getPrimitiveType();
}
selectedType = makeSelectedTypeStr(selectedType, inPrc);
init_input_shapes({inputShapes});
ov::ParameterVector params;
for (auto&& shape : inputDynamicShapes) {
params.push_back(std::make_shared<ov::op::v0::Parameter>(inPrc, shape));
}
std::shared_ptr<ngraph::Node> pooling = ngraph::builder::makeMaxPoolingV8(params[0], stride, dilation, padBegin, padEnd,
kernel, roundingType, padType,
indexElementType, axis);
pooling->get_rt_info() = getCPUInfo();
ngraph::ResultVector results{std::make_shared<ngraph::opset3::Result>(pooling->output(0))};
function = std::make_shared<ngraph::Function>(results, params, "MaxPooling");
}
};
TEST_P(PoolingLayerCPUTest, CompareWithRefs) {
run();
CheckPluginRelatedResults(compiledModel, "Pooling");
}
TEST_P(MaxPoolingV8LayerCPUTest, CompareWithRefs) {
run();
CheckPluginRelatedResults(compiledModel, "Pooling");
}
namespace {
const auto avx512 = CPUSpecificParams{{}, {}, {"jit_avx512"}, "jit_avx512"};
const auto avx = CPUSpecificParams{{}, {}, {"jit_avx"}, "jit_avx"};
const auto sse42 = CPUSpecificParams{{}, {}, {"jit_sse42"}, "jit_sse42"};
const auto ref = CPUSpecificParams{{}, {}, {"ref_any"}, "ref_any"};
const std::vector<CPUSpecificParams> vecCpuConfigs = {ref, sse42, avx, avx512};
const std::vector<ElementType> inpOutPrecision = {ElementType::f32/*, ElementType::bf16*/};
const std::vector<InputShape> inputShapes3D = {
{ {}, {{3, 4, 64}} },
{ {}, {{2, 8, 12}} },
{ {}, {{1, 16, 12}} },
{ {}, {{1, 21, 4}} },
{ {}, {{1, 32, 8}} },
{
// dynamic
{-1, -1, -1},
// target
{
{1, 32, 8},
{1, 21, 4},
{2, 8, 12}
}
},
{
// dynamic
{{1, 5}, {4, 32}, {1, 64}},
// target
{
{3, 4, 64},
{1, 16, 12},
{1, 32, 8}
}
}
};
const std::vector<InputShape> inputShapes4D = {
{ {}, {{3, 4, 64, 64}} },
{ {}, {{2, 8, 8, 12}} },
{ {}, {{1, 16, 16, 12}} },
{ {}, {{1, 21, 8, 4}} },
{ {}, {{1, 32, 8, 8}} },
{
// dynamic
{-1, -1, -1, -1},
// target
{
{1, 32, 8, 8},
{1, 21, 8, 4},
{2, 8, 8, 12},
{1, 96, 125, 125}
}
},
{
// dynamic
{{1, 5}, {4, 32}, {1, 64}, {1, 64}},
// target
{
{3, 4, 64, 64},
{1, 16, 16, 12},
{1, 32, 8, 8}
}
},
{
// dynamic
{{1, 10}, 16, 8, 8},
// target
{
{1, 16, 8, 8},
{2, 16, 8, 8},
}
}
};
const std::vector<InputShape> inputShapes5D = {
{ {}, {{1, 4, 16, 16, 16}} },
{ {}, {{2, 8, 8, 8, 8}} },
{ {}, {{2, 16, 12, 16, 20}} },
{ {}, {{1, 19, 16, 20, 8}} },
{ {}, {{1, 32, 16, 8, 12}} },
{
// dynamic
{-1, -1, -1, -1, -1},
// target
{
{2, 8, 8, 8, 8},
{1, 19, 16, 20, 8},
{1, 4, 16, 16, 16}
}
},
{
// dynamic
{{1, 5}, {4, 32}, {1, 64}, {1, 64}, {1, 25}},
// target
{
{1, 4, 16, 16, 16},
{1, 32, 16, 8, 12},
{3, 16, 4, 8, 3}
}
}
};
/* ============= Pooling (1D) ============= */
const std::vector<LayerTestsDefinitions::poolSpecificParams> paramsMax3D = {
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {2}, {2}, {0}, {0},
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false },
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {4}, {2}, {0}, {0},
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false },
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {2}, {1}, {0}, {0},
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false },
};
const std::vector<LayerTestsDefinitions::poolSpecificParams> paramsAvg3D = {
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {3}, {1}, {1}, {0},
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_UPPER, false },
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {3}, {1}, {1}, {0},
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, true },
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {4}, {4}, {2}, {2},
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, true },
};
const std::vector<LayerTestsDefinitions::poolSpecificParams> paramsAvg3D_RefOnly = {
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2}, {2}, {2}, {2},
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false },
};
INSTANTIATE_TEST_SUITE_P(smoke_MaxPool_CPU_3D, PoolingLayerCPUTest,
::testing::Combine(
::testing::ValuesIn(paramsMax3D),
::testing::ValuesIn(inputShapes3D),
::testing::ValuesIn(inpOutPrecision),
::testing::Values(false),
::testing::ValuesIn(filterCPUInfoForDevice(vecCpuConfigs)),
::testing::Values(emptyFusingSpec)),
PoolingLayerCPUTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_AvgPool_CPU_3D, PoolingLayerCPUTest,
::testing::Combine(
::testing::ValuesIn(paramsAvg3D),
::testing::ValuesIn(inputShapes3D),
::testing::ValuesIn(inpOutPrecision),
::testing::Values(false),
::testing::ValuesIn(filterCPUInfoForDevice(vecCpuConfigs)),
::testing::Values(emptyFusingSpec)),
PoolingLayerCPUTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_AvgPool_CPU_3D_NotOptimized, PoolingLayerCPUTest,
::testing::Combine(
::testing::ValuesIn(paramsAvg3D_RefOnly),
::testing::ValuesIn(inputShapes3D),
::testing::ValuesIn(inpOutPrecision),
::testing::Values(false),
::testing::Values(ref),
::testing::Values(emptyFusingSpec)),
PoolingLayerCPUTest::getTestCaseName);
/* ============= Pooling (2D) ============= */
const std::vector<LayerTestsDefinitions::poolSpecificParams> paramsMax4D = {
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {2, 2}, {2, 2}, {0, 0}, {0, 0},
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_LOWER, false },
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {2, 2}, {2, 2}, {0, 0}, {0, 0},
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_UPPER, false },
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {4, 2}, {2, 2}, {0, 0}, {0, 0},
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false },
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {4, 2}, {2, 1}, {0, 0}, {0, 0},
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false },
};
const std::vector<LayerTestsDefinitions::maxPoolV8SpecificParams> paramsMaxV84D = {
LayerTestsDefinitions::maxPoolV8SpecificParams{ {2, 2}, {2, 2}, {1, 1}, {0, 0}, {0, 0},
ngraph::element::Type_t::i32, 0,
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_LOWER },
};
const std::vector<LayerTestsDefinitions::maxPoolV8SpecificParams> paramsMaxV84D_ref = {
LayerTestsDefinitions::maxPoolV8SpecificParams{ {2, 2}, {2, 2}, {2, 2}, {0, 0}, {0, 0},
ngraph::element::Type_t::i32, 0,
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_UPPER },
LayerTestsDefinitions::maxPoolV8SpecificParams{ {4, 2}, {2, 2}, {1, 2}, {0, 0}, {0, 0},
ngraph::element::Type_t::i32, 0,
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT },
LayerTestsDefinitions::maxPoolV8SpecificParams{ {4, 2}, {2, 1}, {2, 2}, {0, 0}, {0, 0},
ngraph::element::Type_t::i32, 0,
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT },
};
const std::vector<LayerTestsDefinitions::poolSpecificParams> paramsAvg4D = {
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2}, {2, 2}, {1, 0}, {0, 0},
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_LOWER, true },
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2}, {2, 2}, {1, 0}, {0, 0},
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_UPPER, true },
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2}, {2, 2}, {1, 0}, {0, 0},
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_LOWER, false },
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2}, {2, 2}, {1, 0}, {0, 0},
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_UPPER, false },
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2}, {2, 2}, {0, 0}, {0, 0},
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, true },
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {4, 4}, {4, 4}, {2, 2}, {2, 2},
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, true },
};
const std::vector<LayerTestsDefinitions::poolSpecificParams> paramsAvg4D_RefOnly = {
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2}, {2, 2}, {2, 2}, {2, 2},
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false },
};
INSTANTIATE_TEST_SUITE_P(smoke_MaxPool_CPU_4D, PoolingLayerCPUTest,
::testing::Combine(
::testing::ValuesIn(paramsMax4D),
::testing::ValuesIn(inputShapes4D),
::testing::ValuesIn(inpOutPrecision),
::testing::Values(false),
::testing::ValuesIn(filterCPUInfoForDevice(vecCpuConfigs)),
::testing::Values(emptyFusingSpec)),
PoolingLayerCPUTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_MaxPoolV8_CPU_4D, MaxPoolingV8LayerCPUTest,
::testing::Combine(
::testing::ValuesIn(paramsMaxV84D),
::testing::ValuesIn(inputShapes4D),
::testing::ValuesIn(inpOutPrecision),
::testing::ValuesIn(filterCPUInfoForDevice(vecCpuConfigs))),
MaxPoolingV8LayerCPUTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_MaxPoolV8_CPU_4D_ref, MaxPoolingV8LayerCPUTest,
::testing::Combine(
::testing::ValuesIn(paramsMaxV84D_ref),
::testing::ValuesIn(inputShapes4D),
::testing::ValuesIn(inpOutPrecision),
::testing::Values(ref)),
MaxPoolingV8LayerCPUTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_AvgPool_CPU_4D, PoolingLayerCPUTest,
::testing::Combine(
::testing::ValuesIn(paramsAvg4D),
::testing::ValuesIn(inputShapes4D),
::testing::ValuesIn(inpOutPrecision),
::testing::Values(false),
::testing::ValuesIn(filterCPUInfoForDevice(vecCpuConfigs)),
::testing::Values(emptyFusingSpec)),
PoolingLayerCPUTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_AvgPool_CPU_4D_NotOptimized, PoolingLayerCPUTest,
::testing::Combine(
::testing::ValuesIn(paramsAvg4D_RefOnly),
::testing::ValuesIn(inputShapes4D),
::testing::ValuesIn(inpOutPrecision),
::testing::Values(false),
::testing::Values(ref),
::testing::Values(emptyFusingSpec)),
PoolingLayerCPUTest::getTestCaseName);
const std::vector<LayerTestsDefinitions::poolSpecificParams> paramsAvg4D_Large = {
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {65, 65}, {65, 65}, {0, 0}, {0, 0},
ngraph::op::RoundingType::FLOOR, ngraph::op::PadType::VALID, true },
};
const std::vector<InputShape> inputShapes4D_Large = {
{
// dynamic
{-1, -1, -1, -1},
// target
{
{1, 16, 65, 65},
{1, 8, 130, 130},
{1, 16, 65, 65}
}
},
};
INSTANTIATE_TEST_SUITE_P(smoke_AvgPool_CPU_Large, PoolingLayerCPUTest,
::testing::Combine(
::testing::ValuesIn(paramsAvg4D_Large),
::testing::ValuesIn(inputShapes4D_Large),
::testing::ValuesIn(inpOutPrecision),
::testing::Values(false),
::testing::ValuesIn(filterCPUInfoForDevice(vecCpuConfigs)),
::testing::Values(emptyFusingSpec)),
PoolingLayerCPUTest::getTestCaseName);
/* ============= Pooling (3D) ============= */
const std::vector<LayerTestsDefinitions::poolSpecificParams> paramsMax5D = {
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {2, 2, 2}, {1, 1, 1}, {0, 0, 0}, {0, 0, 0},
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_LOWER, false },
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {2, 2, 2}, {1, 1, 1}, {0, 0, 0}, {0, 0, 0},
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_UPPER, false },
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {2, 2, 2}, {1, 1, 1}, {1, 1, 1}, {1, 1, 1},
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false },
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {3, 3, 3}, {2, 2, 2}, {1, 1, 1}, {1, 1, 1},
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false },
};
const std::vector<LayerTestsDefinitions::maxPoolV8SpecificParams> paramsMaxV85D = {
LayerTestsDefinitions::maxPoolV8SpecificParams{ {2, 2, 2}, {1, 1, 1}, {1, 1, 1}, {0, 0, 0}, {0, 0, 0},
ngraph::element::Type_t::i32, 0,
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_LOWER },
};
const std::vector<LayerTestsDefinitions::maxPoolV8SpecificParams> paramsMaxV85D_ref = {
LayerTestsDefinitions::maxPoolV8SpecificParams{ {2, 2, 2}, {1, 1, 1}, {2, 2, 2}, {0, 0, 0}, {0, 0, 0},
ngraph::element::Type_t::i32, 0,
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_UPPER },
LayerTestsDefinitions::maxPoolV8SpecificParams{ {2, 2, 2}, {1, 1, 1}, {2, 2, 2}, {1, 1, 1}, {1, 1, 1},
ngraph::element::Type_t::i32, 0,
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT },
LayerTestsDefinitions::maxPoolV8SpecificParams{ {2, 3, 4}, {2, 2, 2}, {2, 1, 1}, {1, 1, 1}, {1, 2, 2},
ngraph::element::Type_t::i32, 0,
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT },
};
const std::vector<LayerTestsDefinitions::poolSpecificParams> paramsAvg5D = {
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2, 2}, {2, 2, 2}, {1, 0, 0}, {0, 0, 0},
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_LOWER, true },
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2, 2}, {2, 2, 2}, {1, 0, 0}, {0, 0, 0},
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_UPPER, true },
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2, 2}, {2, 2, 2}, {1, 0, 0}, {0, 0, 0},
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_LOWER, false },
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2, 2}, {2, 2, 2}, {1, 0, 0}, {0, 0, 0},
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_UPPER, false },
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2, 2}, {2, 2, 2}, {0, 0, 0}, {0, 0, 0},
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, true },
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {3, 3, 3}, {3, 3, 3}, {1, 1, 1}, {0, 0, 0},
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, true },
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {4, 4, 4}, {2, 2, 2}, {2, 2, 2}, {2, 2, 2},
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, true },
};
const std::vector<LayerTestsDefinitions::poolSpecificParams> paramsAvg5D_RefOnly = {
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2, 2}, {2, 2, 2}, {2, 2, 2}, {2, 2, 2},
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false },
};
INSTANTIATE_TEST_SUITE_P(smoke_MaxPool_CPU_5D, PoolingLayerCPUTest,
::testing::Combine(
::testing::ValuesIn(paramsMax5D),
::testing::ValuesIn(inputShapes5D),
::testing::ValuesIn(inpOutPrecision),
::testing::Values(false),
::testing::ValuesIn(filterCPUInfoForDevice(vecCpuConfigs)),
::testing::Values(emptyFusingSpec)),
PoolingLayerCPUTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_MaxPoolV8_CPU_5D, MaxPoolingV8LayerCPUTest,
::testing::Combine(
::testing::ValuesIn(paramsMaxV85D),
::testing::ValuesIn(inputShapes5D),
::testing::ValuesIn(inpOutPrecision),
::testing::ValuesIn(filterCPUInfoForDevice(vecCpuConfigs))),
MaxPoolingV8LayerCPUTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_MaxPoolV8_CPU_5D_ref, MaxPoolingV8LayerCPUTest,
::testing::Combine(
::testing::ValuesIn(paramsMaxV85D_ref),
::testing::ValuesIn(inputShapes5D),
::testing::ValuesIn(inpOutPrecision),
::testing::Values(ref)),
MaxPoolingV8LayerCPUTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_AvgPool_CPU_5D, PoolingLayerCPUTest,
::testing::Combine(
::testing::ValuesIn(paramsAvg5D),
::testing::ValuesIn(inputShapes5D),
::testing::ValuesIn(inpOutPrecision),
::testing::Values(false),
::testing::ValuesIn(filterCPUInfoForDevice(vecCpuConfigs)),
::testing::Values(emptyFusingSpec)),
PoolingLayerCPUTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_AvgPool_CPU_5D_NotOptimized, PoolingLayerCPUTest,
::testing::Combine(
::testing::ValuesIn(paramsAvg5D_RefOnly),
::testing::ValuesIn(inputShapes5D),
::testing::ValuesIn(inpOutPrecision),
::testing::Values(false),
::testing::Values(ref),
::testing::Values(emptyFusingSpec)),
PoolingLayerCPUTest::getTestCaseName);
/* === Fusing === */
const auto avx512_nhwc = CPUSpecificParams{{nhwc}, {nhwc}, {"jit_avx512"}, "jit_avx512"};
const auto avx512_ndhwc = CPUSpecificParams{{ndhwc}, {ndhwc}, {"jit_avx512"}, "jit_avx512"};
const auto avx2_nhwc = CPUSpecificParams{{nhwc}, {nhwc}, {"jit_avx2"}, "jit_avx2"};
const auto avx2_ndhwc = CPUSpecificParams{{ndhwc}, {ndhwc}, {"jit_avx2"}, "jit_avx2"};
const auto sse42_nhwc = CPUSpecificParams{{nhwc}, {nhwc}, {"jit_sse42"}, "jit_sse42"};
const auto sse42_ndhwc = CPUSpecificParams{{ndhwc}, {ndhwc}, {"jit_sse42"}, "jit_sse42"};
const std::vector<CPUSpecificParams> vecCpuConfigsFusing_4D = {sse42_nhwc, avx2_nhwc, avx512_nhwc};
const std::vector<CPUSpecificParams> vecCpuConfigsFusing_5D = {sse42_ndhwc, avx2_ndhwc, avx512_ndhwc};
std::vector<fusingSpecificParams> fusingParamsSet {
emptyFusingSpec,
fusingFakeQuantizePerTensor,
fusingFakeQuantizePerChannel,
};
const std::vector<InputShape> inputShapes4D_int8 = {
{ {}, {{3, 4, 64, 64}} },
{ {}, {{2, 8, 8, 12}} },
{ {}, {{1, 16, 16, 12}} },
{ {}, {{1, 21, 8, 4}} },
{ {}, {{1, 32, 8, 8}} },
{
// dynamic
{-1, 32, -1, -1},
// target
{
{1, 32, 8, 8},
{1, 32, 8, 4},
{2, 32, 8, 12},
{1, 32, 8, 8}
}
},
{
// dynamic
{{1, 5}, 16, {1, 64}, {1, 64}},
// target
{
{3, 16, 32, 32},
{1, 16, 16, 12},
{1, 16, 8, 8},
{3, 16, 32, 32},
}
}
};
INSTANTIATE_TEST_SUITE_P(smoke_AvgPool_CPU_4D_I8, PoolingLayerCPUTest,
::testing::Combine(
::testing::ValuesIn(paramsAvg4D),
::testing::ValuesIn(inputShapes4D_int8),
::testing::Values(ElementType::f32),
::testing::Values(true),
::testing::ValuesIn(filterCPUInfoForDevice(vecCpuConfigsFusing_4D)),
::testing::ValuesIn(fusingParamsSet)),
PoolingLayerCPUTest::getTestCaseName);
const std::vector<InputShape> inputShapes5D_int8 = {
{ {}, {{1, 4, 16, 16, 16}} },
{ {}, {{2, 8, 8, 8, 8}} },
{ {}, {{2, 16, 12, 16, 20}} },
{ {}, {{1, 19, 16, 20, 8}} },
{ {}, {{1, 32, 16, 8, 12}} },
{
// dynamic
{-1, 32, -1, -1, -1},
// target
{
{2, 32, 8, 8, 8},
{1, 32, 16, 20, 8},
{1, 32, 16, 16, 16},
{2, 32, 8, 8, 8}
}
},
{
// dynamic
{{1, 5}, 16, {1, 64}, {1, 64}, {1, 25}},
// target
{
{1, 16, 16, 16, 16},
{1, 16, 16, 8, 12},
{2, 16, 8, 8, 8},
{1, 16, 16, 16, 16},
}
}
};
INSTANTIATE_TEST_SUITE_P(smoke_AvgPool_CPU_5D_I8, PoolingLayerCPUTest,
::testing::Combine(
::testing::ValuesIn(paramsAvg5D),
::testing::ValuesIn(inputShapes5D_int8),
::testing::Values(ElementType::f32),
::testing::Values(true),
::testing::ValuesIn(filterCPUInfoForDevice(vecCpuConfigsFusing_5D)),
::testing::ValuesIn(fusingParamsSet)),
PoolingLayerCPUTest::getTestCaseName);
} // namespace
} // namespace CPULayerTestsDefinitions