[CPU] Added MaxPool-8 support (#8788)
This commit is contained in:
parent
9df738ebb8
commit
ad668d6ac6
@ -94,12 +94,12 @@ MKLDNNDescriptor::operator std::shared_ptr<mkldnn::lrn_forward::desc>() {
|
|||||||
return typeDesc->getPtr();
|
return typeDesc->getPtr();
|
||||||
}
|
}
|
||||||
|
|
||||||
MKLDNNDescriptor::MKLDNNDescriptor(std::shared_ptr<mkldnn::pooling_forward::desc> desc) {
|
MKLDNNDescriptor::MKLDNNDescriptor(std::shared_ptr<mkldnn::pooling_v2_forward::desc> desc) {
|
||||||
this->desc.reset(new DescFwdImpl<mkldnn::pooling_forward::desc>(desc));
|
this->desc.reset(new DescFwdImpl<mkldnn::pooling_v2_forward::desc>(desc));
|
||||||
}
|
}
|
||||||
|
|
||||||
MKLDNNDescriptor::operator std::shared_ptr<mkldnn::pooling_forward::desc>() {
|
MKLDNNDescriptor::operator std::shared_ptr<mkldnn::pooling_v2_forward::desc>() {
|
||||||
auto typeDesc = std::dynamic_pointer_cast<DescFwdImpl<mkldnn::pooling_forward::desc>>(desc);
|
auto typeDesc = std::dynamic_pointer_cast<DescFwdImpl<mkldnn::pooling_v2_forward::desc>>(desc);
|
||||||
if (typeDesc == nullptr) {
|
if (typeDesc == nullptr) {
|
||||||
IE_THROW() << "Cannot cast descriptor!";
|
IE_THROW() << "Cannot cast descriptor!";
|
||||||
}
|
}
|
||||||
|
@ -28,8 +28,8 @@ public:
|
|||||||
explicit MKLDNNDescriptor(std::shared_ptr<mkldnn::lrn_forward::desc> desc);
|
explicit MKLDNNDescriptor(std::shared_ptr<mkldnn::lrn_forward::desc> desc);
|
||||||
operator std::shared_ptr<mkldnn::lrn_forward::desc>();
|
operator std::shared_ptr<mkldnn::lrn_forward::desc>();
|
||||||
|
|
||||||
explicit MKLDNNDescriptor(std::shared_ptr<mkldnn::pooling_forward::desc> desc);
|
explicit MKLDNNDescriptor(std::shared_ptr<mkldnn::pooling_v2_forward::desc> desc);
|
||||||
operator std::shared_ptr<mkldnn::pooling_forward::desc>();
|
operator std::shared_ptr<mkldnn::pooling_v2_forward::desc>();
|
||||||
|
|
||||||
explicit MKLDNNDescriptor(std::shared_ptr<mkldnn::softmax_forward::desc> desc);
|
explicit MKLDNNDescriptor(std::shared_ptr<mkldnn::softmax_forward::desc> desc);
|
||||||
operator std::shared_ptr<mkldnn::softmax_forward::desc>();
|
operator std::shared_ptr<mkldnn::softmax_forward::desc>();
|
||||||
|
@ -20,10 +20,15 @@ using namespace mkldnn;
|
|||||||
using namespace MKLDNNPlugin;
|
using namespace MKLDNNPlugin;
|
||||||
using namespace InferenceEngine;
|
using namespace InferenceEngine;
|
||||||
|
|
||||||
bool MKLDNNPoolingNode::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept {
|
bool MKLDNNPoolingNode::isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept {
|
||||||
try {
|
try {
|
||||||
if (!ngraph::as_type_ptr<const ngraph::op::v1::MaxPool>(op) && !ngraph::as_type_ptr<const ngraph::op::v1::AvgPool>(op)) {
|
if (ov::is_type<const ov::op::v8::MaxPool>(op)) {
|
||||||
errorMessage = "Only opset1 MaxPool and AvgPool operations are supported";
|
if (!op->get_output_target_inputs(1).empty()) {
|
||||||
|
errorMessage = "MaxPool from opset8 is supported only with one output";
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
} else if (!ov::is_type<const ov::op::v1::MaxPool>(op) && !ov::is_type<const ov::op::v1::AvgPool>(op)) {
|
||||||
|
errorMessage = "MaxPool and AvgPool from opset1 and MaxPool from opset8 are supported";
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
} catch (...) {
|
} catch (...) {
|
||||||
@ -32,48 +37,52 @@ bool MKLDNNPoolingNode::isSupportedOperation(const std::shared_ptr<const ngraph:
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
MKLDNNPoolingNode::MKLDNNPoolingNode(const std::shared_ptr<ngraph::Node>& op, const mkldnn::engine& eng, MKLDNNWeightsSharing::Ptr &cache)
|
MKLDNNPoolingNode::MKLDNNPoolingNode(const std::shared_ptr<ov::Node>& op, const mkldnn::engine& eng, MKLDNNWeightsSharing::Ptr &cache)
|
||||||
: MKLDNNNode(op, eng, cache) {
|
: MKLDNNNode(op, eng, cache) {
|
||||||
std::string errorMessage;
|
std::string errorMessage;
|
||||||
if (!isSupportedOperation(op, errorMessage)) {
|
if (!isSupportedOperation(op, errorMessage)) {
|
||||||
IE_THROW(NotImplemented) << errorMessage;
|
IE_THROW(NotImplemented) << errorMessage;
|
||||||
}
|
}
|
||||||
|
|
||||||
auto maxPoolOp = ngraph::as_type_ptr<ngraph::op::v1::MaxPool>(op);
|
auto get_attributes = [](std::vector<ptrdiff_t>& internal_attribute, const std::vector<size_t> external_attribute) {
|
||||||
auto avgPoolOp = ngraph::as_type_ptr<ngraph::op::v1::AvgPool>(op);
|
for (size_t i = 0; i < external_attribute.size(); i++) {
|
||||||
if (maxPoolOp) {
|
internal_attribute.push_back(static_cast<ptrdiff_t>(external_attribute[i]));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
if (auto maxPoolOp_v8 = ov::as_type_ptr<const ov::op::v8::MaxPool>(op)) {
|
||||||
|
isMaxPool8 = true;
|
||||||
algorithm = PoolingMax;
|
algorithm = PoolingMax;
|
||||||
exclude_pad = false;
|
exclude_pad = false;
|
||||||
|
|
||||||
for (int i = 0; i < maxPoolOp->get_strides().size(); i++) {
|
get_attributes(dilation, maxPoolOp_v8->get_dilations());
|
||||||
stride.push_back(static_cast<ptrdiff_t>(maxPoolOp->get_strides()[i]));
|
get_attributes(stride, maxPoolOp_v8->get_strides());
|
||||||
}
|
get_attributes(kernel, maxPoolOp_v8->get_kernel());
|
||||||
for (int i = 0; i < maxPoolOp->get_kernel().size(); i++) {
|
get_attributes(data_pad_begin, maxPoolOp_v8->get_pads_begin());
|
||||||
kernel.push_back(static_cast<ptrdiff_t>(maxPoolOp->get_kernel()[i]));
|
get_attributes(data_pad_end, maxPoolOp_v8->get_pads_end());
|
||||||
}
|
|
||||||
for (int i = 0; i < maxPoolOp->get_pads_begin().size(); i++) {
|
auto_pad = (maxPoolOp_v8->get_auto_pad() == ov::op::PadType::SAME_LOWER || maxPoolOp_v8->get_auto_pad() == ov::op::PadType::SAME_UPPER);
|
||||||
data_pad_begin.push_back(static_cast<ptrdiff_t>(maxPoolOp->get_pads_begin()[i]));
|
} else if (auto maxPoolOp_v1 = ov::as_type_ptr<const ov::op::v1::MaxPool>(op)) {
|
||||||
}
|
algorithm = PoolingMax;
|
||||||
for (int i = 0; i < maxPoolOp->get_pads_end().size(); i++) {
|
exclude_pad = false;
|
||||||
data_pad_end.push_back(static_cast<ptrdiff_t>(maxPoolOp->get_pads_end()[i]));
|
|
||||||
}
|
get_attributes(stride, maxPoolOp_v1->get_strides());
|
||||||
auto_pad = (maxPoolOp->get_auto_pad() == ov::op::PadType::SAME_LOWER || maxPoolOp->get_auto_pad() == ov::op::PadType::SAME_UPPER);
|
get_attributes(kernel, maxPoolOp_v1->get_kernel());
|
||||||
} else if (avgPoolOp) {
|
get_attributes(data_pad_begin, maxPoolOp_v1->get_pads_begin());
|
||||||
|
get_attributes(data_pad_end, maxPoolOp_v1->get_pads_end());
|
||||||
|
dilation.resize(kernel.size(), 1);
|
||||||
|
|
||||||
|
auto_pad = (maxPoolOp_v1->get_auto_pad() == ov::op::PadType::SAME_LOWER || maxPoolOp_v1->get_auto_pad() == ov::op::PadType::SAME_UPPER);
|
||||||
|
} else if (auto avgPoolOp = ov::as_type_ptr<const ov::op::v1::AvgPool>(op)) {
|
||||||
algorithm = PoolingAvg;
|
algorithm = PoolingAvg;
|
||||||
exclude_pad = avgPoolOp->get_exclude_pad();
|
exclude_pad = avgPoolOp->get_exclude_pad();
|
||||||
|
|
||||||
for (int i = 0; i < avgPoolOp->get_strides().size(); i++) {
|
get_attributes(stride, avgPoolOp->get_strides());
|
||||||
stride.push_back(static_cast<ptrdiff_t>(avgPoolOp->get_strides()[i]));
|
get_attributes(kernel, avgPoolOp->get_kernel());
|
||||||
}
|
get_attributes(data_pad_begin, avgPoolOp->get_pads_begin());
|
||||||
for (int i = 0; i < avgPoolOp->get_kernel().size(); i++) {
|
get_attributes(data_pad_end, avgPoolOp->get_pads_end());
|
||||||
kernel.push_back(static_cast<ptrdiff_t>(avgPoolOp->get_kernel()[i]));
|
dilation.resize(kernel.size(), 1);
|
||||||
}
|
|
||||||
for (int i = 0; i < avgPoolOp->get_pads_begin().size(); i++) {
|
|
||||||
data_pad_begin.push_back(static_cast<ptrdiff_t>(avgPoolOp->get_pads_begin()[i]));
|
|
||||||
}
|
|
||||||
for (int i = 0; i < avgPoolOp->get_pads_end().size(); i++) {
|
|
||||||
data_pad_end.push_back(static_cast<ptrdiff_t>(avgPoolOp->get_pads_end()[i]));
|
|
||||||
}
|
|
||||||
auto_pad = (avgPoolOp->get_auto_pad() == ov::op::PadType::SAME_LOWER || avgPoolOp->get_auto_pad() == ov::op::PadType::SAME_UPPER);
|
auto_pad = (avgPoolOp->get_auto_pad() == ov::op::PadType::SAME_LOWER || avgPoolOp->get_auto_pad() == ov::op::PadType::SAME_UPPER);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -94,20 +103,23 @@ std::vector<memory::format_tag> MKLDNNPoolingNode::getAvailableFormatsForDims(co
|
|||||||
return {memory::format_tag::any};
|
return {memory::format_tag::any};
|
||||||
}
|
}
|
||||||
|
|
||||||
void MKLDNNPoolingNode::initEffectivePad(const Shape &inShape, const Shape &outShape) {
|
void MKLDNNPoolingNode::initEffectiveAttributes(const Shape &inShape, const Shape &outShape) {
|
||||||
effective_pad_begin = data_pad_begin;
|
effective_pad_begin = data_pad_begin;
|
||||||
effective_pad_end.resize(data_pad_end.size());
|
effective_pad_end.resize(data_pad_end.size());
|
||||||
|
effective_dilation.resize(dilation.size(), 0);
|
||||||
|
|
||||||
const auto &inDims = inShape.getStaticDims();
|
const auto &inDims = inShape.getStaticDims();
|
||||||
const auto &outDims = outShape.getStaticDims();
|
const auto &outDims = outShape.getStaticDims();
|
||||||
|
|
||||||
for (int i = 0; i < effective_pad_end.size(); i++) {
|
for (int i = 0; i < effective_pad_end.size(); i++) {
|
||||||
int krn = kernel[i];
|
int krn = kernel[i];
|
||||||
|
int dil = dilation[i];
|
||||||
int src = inDims[2 + i];
|
int src = inDims[2 + i];
|
||||||
int dst = outDims[2 + i];
|
int dst = outDims[2 + i];
|
||||||
|
|
||||||
int calc_dst = (src - krn + data_pad_begin[i]) / stride[i] + 1;
|
int calc_dst = (src - (1 + (krn - 1) * dil) + data_pad_begin[i]) / stride[i] + 1;
|
||||||
effective_pad_end[i] = (dst - calc_dst) * stride[i];
|
effective_pad_end[i] = (dst - calc_dst) * stride[i];
|
||||||
|
effective_dilation[i] = dil - 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -120,8 +132,8 @@ void MKLDNNPoolingNode::getSupportedDescriptors() {
|
|||||||
if (getChildEdges().empty())
|
if (getChildEdges().empty())
|
||||||
IE_THROW() << "Incorrect number of output edges for layer " << getName();
|
IE_THROW() << "Incorrect number of output edges for layer " << getName();
|
||||||
|
|
||||||
inputPrecision = getOriginalInputPrecisionAtPort(0);
|
InferenceEngine::Precision inputPrecision = getOriginalInputPrecisionAtPort(0);
|
||||||
outputPrecision = getOriginalOutputPrecisionAtPort(0);
|
InferenceEngine::Precision outputPrecision = getOriginalOutputPrecisionAtPort(0);
|
||||||
|
|
||||||
// WA: LPT transformation has WA which allows average pooling has I8/U8 output precision instead of FP32,
|
// WA: LPT transformation has WA which allows average pooling has I8/U8 output precision instead of FP32,
|
||||||
// so we explicitly set output precision as FP32
|
// so we explicitly set output precision as FP32
|
||||||
@ -151,8 +163,8 @@ void MKLDNNPoolingNode::getSupportedDescriptors() {
|
|||||||
if ((inputRank < 3) || (inputRank > 5))
|
if ((inputRank < 3) || (inputRank > 5))
|
||||||
IE_THROW() << "Pooling layer. Unsupported mode. Only 3D, 4D and 5D blobs are supported as input.";
|
IE_THROW() << "Pooling layer. Unsupported mode. Only 3D, 4D and 5D blobs are supported as input.";
|
||||||
|
|
||||||
initEffectivePad(MemoryDescUtils::makeDummyShape(parentShape),
|
initEffectiveAttributes(MemoryDescUtils::makeDummyShape(parentShape),
|
||||||
MemoryDescUtils::makeDummyShape(childShape));
|
MemoryDescUtils::makeDummyShape(childShape));
|
||||||
|
|
||||||
if (inputPrecision == Precision::I8 || inputPrecision == Precision::U8) {
|
if (inputPrecision == Precision::I8 || inputPrecision == Precision::U8) {
|
||||||
// We have to extend i8i8_pooling_fwd_t from oneDNN to support BF16 output data type
|
// We have to extend i8i8_pooling_fwd_t from oneDNN to support BF16 output data type
|
||||||
@ -185,7 +197,7 @@ void MKLDNNPoolingNode::getSupportedDescriptors() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
std::pair<std::vector<ptrdiff_t>, std::vector<ptrdiff_t>> MKLDNNPoolingNode::getPaddingFromNode(std::shared_ptr<ngraph::Node> node) const {
|
std::pair<std::vector<ptrdiff_t>, std::vector<ptrdiff_t>> MKLDNNPoolingNode::getPaddingFromNode(std::shared_ptr<ov::Node> node) const {
|
||||||
const auto convertPadding = [](const VectorDims &newPads) {
|
const auto convertPadding = [](const VectorDims &newPads) {
|
||||||
std::vector<ptrdiff_t> pads(newPads.size());
|
std::vector<ptrdiff_t> pads(newPads.size());
|
||||||
for (int i = 0; i < newPads.size(); i++) {
|
for (int i = 0; i < newPads.size(); i++) {
|
||||||
@ -195,12 +207,16 @@ std::pair<std::vector<ptrdiff_t>, std::vector<ptrdiff_t>> MKLDNNPoolingNode::get
|
|||||||
};
|
};
|
||||||
|
|
||||||
VectorDims padsBegin, padsEnd;
|
VectorDims padsBegin, padsEnd;
|
||||||
if (getAlgorithm() == PoolingMax) {
|
if (isMaxPool8) {
|
||||||
const auto pool = ngraph::as_type_ptr<const ngraph::op::v1::MaxPool>(opToShapeInfer);
|
const auto pool = ov::as_type_ptr<const ov::op::v8::MaxPool>(opToShapeInfer);
|
||||||
|
padsBegin = pool->get_pads_begin();
|
||||||
|
padsEnd = pool->get_pads_end();
|
||||||
|
} else if (getAlgorithm() == PoolingMax) {
|
||||||
|
const auto pool = ov::as_type_ptr<const ov::op::v1::MaxPool>(opToShapeInfer);
|
||||||
padsBegin = pool->get_pads_begin();
|
padsBegin = pool->get_pads_begin();
|
||||||
padsEnd = pool->get_pads_end();
|
padsEnd = pool->get_pads_end();
|
||||||
} else if (getAlgorithm() == PoolingAvg) {
|
} else if (getAlgorithm() == PoolingAvg) {
|
||||||
const auto pool = ngraph::as_type_ptr<const ngraph::op::v1::AvgPool>(opToShapeInfer);
|
const auto pool = ov::as_type_ptr<const ov::op::v1::AvgPool>(opToShapeInfer);
|
||||||
padsBegin = pool->get_pads_begin();
|
padsBegin = pool->get_pads_begin();
|
||||||
padsEnd = pool->get_pads_end();
|
padsEnd = pool->get_pads_end();
|
||||||
}
|
}
|
||||||
@ -231,15 +247,15 @@ void MKLDNNPoolingNode::prepareParams() {
|
|||||||
if (auto_pad) {
|
if (auto_pad) {
|
||||||
std::tie(data_pad_begin, data_pad_end) = getPaddingFromNode(opToShapeInfer);
|
std::tie(data_pad_begin, data_pad_end) = getPaddingFromNode(opToShapeInfer);
|
||||||
}
|
}
|
||||||
initEffectivePad(inDesc->getShape(), outDesc->getShape());
|
initEffectiveAttributes(inDesc->getShape(), outDesc->getShape());
|
||||||
}
|
}
|
||||||
|
|
||||||
mkldnn::algorithm alg = getPoolingAlgorithm();
|
mkldnn::algorithm alg = getPoolingAlgorithm();
|
||||||
MKLDNNDescriptor desc{createDescriptorInternal(in_candidate, out_candidate, alg)};
|
MKLDNNDescriptor desc{createDescriptorInternal(in_candidate, out_candidate, alg)};
|
||||||
pooling_forward::primitive_desc prim_desc;
|
pooling_v2_forward::primitive_desc prim_desc;
|
||||||
primitive_desc_iterator itpd = desc.createPrimitiveDescriptorIterator(getEngine(), *attr);
|
primitive_desc_iterator itpd = desc.createPrimitiveDescriptorIterator(getEngine(), *attr);
|
||||||
|
|
||||||
while (static_cast<bool>(itpd)) {
|
while (static_cast<bool>(itpd)) {
|
||||||
impl_desc_type impl_type = parse_impl_name(itpd.impl_info_str());
|
impl_desc_type impl_type = parse_impl_name(itpd.impl_info_str());
|
||||||
|
|
||||||
if (impl_type == selected_pd->getImplementationType()) {
|
if (impl_type == selected_pd->getImplementationType()) {
|
||||||
@ -250,7 +266,7 @@ void MKLDNNPoolingNode::prepareParams() {
|
|||||||
IE_THROW() << "Primitive descriptor was not found for node " << getName() << ".";
|
IE_THROW() << "Primitive descriptor was not found for node " << getName() << ".";
|
||||||
}
|
}
|
||||||
|
|
||||||
prim.reset(new pooling_forward(prim_desc));
|
prim.reset(new pooling_v2_forward(prim_desc));
|
||||||
|
|
||||||
auto src = getParentEdgesAtPort(0)[0]->getMemoryPtr()->GetPrimitive();
|
auto src = getParentEdgesAtPort(0)[0]->getMemoryPtr()->GetPrimitive();
|
||||||
auto dst = getChildEdgesAtPort(0)[0]->getMemoryPtr()->GetPrimitive();
|
auto dst = getChildEdgesAtPort(0)[0]->getMemoryPtr()->GetPrimitive();
|
||||||
@ -296,9 +312,9 @@ mkldnn::algorithm MKLDNNPoolingNode::getPoolingAlgorithm() const {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
std::shared_ptr<pooling_forward::desc> MKLDNNPoolingNode::createDescriptorInternal(const mkldnn::memory::desc& in_candidate,
|
std::shared_ptr<pooling_v2_forward::desc> MKLDNNPoolingNode::createDescriptorInternal(const mkldnn::memory::desc& in_candidate,
|
||||||
const mkldnn::memory::desc& out_candidate,
|
const mkldnn::memory::desc& out_candidate,
|
||||||
const mkldnn::algorithm alg) const {
|
const mkldnn::algorithm alg) const {
|
||||||
if (alg == mkldnn::algorithm::undef) {
|
if (alg == mkldnn::algorithm::undef) {
|
||||||
IE_THROW() << "Unsupported pooling type";
|
IE_THROW() << "Unsupported pooling type";
|
||||||
}
|
}
|
||||||
@ -306,13 +322,14 @@ std::shared_ptr<pooling_forward::desc> MKLDNNPoolingNode::createDescriptorIntern
|
|||||||
auto convert = [] (std::vector<ptrdiff_t> orig_dims) {
|
auto convert = [] (std::vector<ptrdiff_t> orig_dims) {
|
||||||
return memory::dims(orig_dims.begin(), orig_dims.end());
|
return memory::dims(orig_dims.begin(), orig_dims.end());
|
||||||
};
|
};
|
||||||
std::shared_ptr<pooling_forward::desc> desc_ptr(
|
std::shared_ptr<pooling_v2_forward::desc> desc_ptr(
|
||||||
new pooling_forward::desc(prop_kind::forward_scoring, alg,
|
new pooling_v2_forward::desc(prop_kind::forward_scoring, alg,
|
||||||
in_candidate, out_candidate,
|
in_candidate, out_candidate,
|
||||||
convert(stride),
|
convert(stride),
|
||||||
convert(kernel),
|
convert(kernel),
|
||||||
convert(effective_pad_begin),
|
convert(effective_dilation),
|
||||||
convert(effective_pad_end)));
|
convert(effective_pad_begin),
|
||||||
|
convert(effective_pad_end)));
|
||||||
|
|
||||||
if (alg == mkldnn::algorithm::pooling_avg_include_padding) {
|
if (alg == mkldnn::algorithm::pooling_avg_include_padding) {
|
||||||
// In case of AVG including paddings the norm coeff should be calculated
|
// In case of AVG including paddings the norm coeff should be calculated
|
||||||
@ -343,14 +360,12 @@ void MKLDNNPoolingNode::createDescriptor(const std::vector<MemoryDescPtr> &input
|
|||||||
if (auto_pad) {
|
if (auto_pad) {
|
||||||
std::tie(data_pad_begin, data_pad_end) = getPaddingFromNode(opToShapeInfer);
|
std::tie(data_pad_begin, data_pad_end) = getPaddingFromNode(opToShapeInfer);
|
||||||
}
|
}
|
||||||
initEffectivePad(inDesc->getShape(), outDesc->getShape());
|
initEffectiveAttributes(inDesc->getShape(), outDesc->getShape());
|
||||||
}
|
}
|
||||||
auto dnnlOutDesc = MemoryDescUtils::convertToDnnlBlockedMemoryDesc(*outDesc);
|
auto dnnlOutDesc = MemoryDescUtils::convertToDnnlBlockedMemoryDesc(*outDesc);
|
||||||
auto out_candidate = dnnlOutDesc.getDnnlDesc();
|
auto out_candidate = dnnlOutDesc.getDnnlDesc();
|
||||||
|
|
||||||
mkldnn::algorithm alg = getPoolingAlgorithm();
|
auto desc_ptr = createDescriptorInternal(in_candidate, out_candidate, getPoolingAlgorithm());
|
||||||
auto desc_ptr = createDescriptorInternal(in_candidate, out_candidate, alg);
|
|
||||||
|
|
||||||
descs.emplace_back(desc_ptr);
|
descs.emplace_back(desc_ptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -383,6 +398,18 @@ void MKLDNNPoolingNode::initSupportedPrimitiveDescriptors() {
|
|||||||
|
|
||||||
config.outConfs.push_back(dataConfig);
|
config.outConfs.push_back(dataConfig);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CPU plugin doesn't support second output of MaxPool-8, but anyway we should have out config for second port as stub
|
||||||
|
if (isMaxPool8) {
|
||||||
|
auto& creatorsMap = BlockedDescCreator::getCommonCreators();
|
||||||
|
PortConfig dataConfig;
|
||||||
|
dataConfig.inPlace = -1;
|
||||||
|
dataConfig.constant = false;
|
||||||
|
dataConfig.desc = creatorsMap.at(LayoutType::ncsp)->createSharedDesc(config.outConfs.front().desc->getPrecision(), getOutputShapeAtPort(1));
|
||||||
|
|
||||||
|
config.outConfs.push_back(dataConfig);
|
||||||
|
}
|
||||||
|
|
||||||
impl_desc_type impl_type = parse_impl_name(itpd.impl_info_str());
|
impl_desc_type impl_type = parse_impl_name(itpd.impl_info_str());
|
||||||
|
|
||||||
supportedPrimitiveDescriptors.emplace_back(config, impl_type);
|
supportedPrimitiveDescriptors.emplace_back(config, impl_type);
|
||||||
@ -434,6 +461,18 @@ void MKLDNNPoolingNode::initDescriptor(const NodeConfig& config) {
|
|||||||
dataConfig.desc = getDstMemDesc(itpd, i);
|
dataConfig.desc = getDstMemDesc(itpd, i);
|
||||||
cfg.outConfs.push_back(dataConfig);
|
cfg.outConfs.push_back(dataConfig);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CPU plugin doesn't support second output of MaxPool-8, but anyway we should have out config for second port as stub
|
||||||
|
if (isMaxPool8) {
|
||||||
|
auto& creatorsMap = BlockedDescCreator::getCommonCreators();
|
||||||
|
PortConfig dataConfig;
|
||||||
|
dataConfig.inPlace = -1;
|
||||||
|
dataConfig.constant = false;
|
||||||
|
dataConfig.desc = creatorsMap.at(LayoutType::ncsp)->createSharedDesc(cfg.outConfs.front().desc->getPrecision(), getOutputShapeAtPort(1));
|
||||||
|
|
||||||
|
cfg.outConfs.push_back(dataConfig);
|
||||||
|
}
|
||||||
|
|
||||||
impl_desc_type impl_type = parse_impl_name(itpd.impl_info_str());
|
impl_desc_type impl_type = parse_impl_name(itpd.impl_info_str());
|
||||||
if (selected_count == selectedPrimitiveDescriptorIndex) {
|
if (selected_count == selectedPrimitiveDescriptorIndex) {
|
||||||
if (impl_type != selectedPD->getImplementationType()) {
|
if (impl_type != selectedPD->getImplementationType()) {
|
||||||
|
@ -14,7 +14,7 @@ namespace MKLDNNPlugin {
|
|||||||
|
|
||||||
class MKLDNNPoolingNode : public MKLDNNNode {
|
class MKLDNNPoolingNode : public MKLDNNNode {
|
||||||
public:
|
public:
|
||||||
MKLDNNPoolingNode(const std::shared_ptr<ngraph::Node>& op, const mkldnn::engine& eng, MKLDNNWeightsSharing::Ptr &cache);
|
MKLDNNPoolingNode(const std::shared_ptr<ov::Node>& op, const mkldnn::engine& eng, MKLDNNWeightsSharing::Ptr &cache);
|
||||||
|
|
||||||
void createDescriptor(const std::vector<MemoryDescPtr>& inputDesc,
|
void createDescriptor(const std::vector<MemoryDescPtr>& inputDesc,
|
||||||
const std::vector<MemoryDescPtr>& outputDesc) override;
|
const std::vector<MemoryDescPtr>& outputDesc) override;
|
||||||
@ -31,7 +31,7 @@ public:
|
|||||||
void prepareParams() override;
|
void prepareParams() override;
|
||||||
void executeDynamicImpl(mkldnn::stream strm) override { execute(strm); }
|
void executeDynamicImpl(mkldnn::stream strm) override { execute(strm); }
|
||||||
|
|
||||||
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept;
|
static bool isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
AttrPtr initPrimitiveAttr() const override;
|
AttrPtr initPrimitiveAttr() const override;
|
||||||
@ -39,17 +39,19 @@ protected:
|
|||||||
private:
|
private:
|
||||||
void setPostOps(mkldnn::primitive_attr &attr, bool initWeights = false) const;
|
void setPostOps(mkldnn::primitive_attr &attr, bool initWeights = false) const;
|
||||||
|
|
||||||
std::pair<std::vector<ptrdiff_t>, std::vector<ptrdiff_t>> getPaddingFromNode(std::shared_ptr<ngraph::Node> node) const;
|
std::pair<std::vector<ptrdiff_t>, std::vector<ptrdiff_t>> getPaddingFromNode(std::shared_ptr<ov::Node> node) const;
|
||||||
void initEffectivePad(const Shape &inDims, const Shape &outDims);
|
void initEffectiveAttributes(const Shape &inDims, const Shape &outDims);
|
||||||
mkldnn::algorithm getPoolingAlgorithm() const;
|
mkldnn::algorithm getPoolingAlgorithm() const;
|
||||||
std::shared_ptr<mkldnn::pooling_forward::desc> createDescriptorInternal(const mkldnn::memory::desc& in_candidate,
|
std::shared_ptr<mkldnn::pooling_v2_forward::desc> createDescriptorInternal(const mkldnn::memory::desc& in_candidate,
|
||||||
const mkldnn::memory::desc& out_candidate,
|
const mkldnn::memory::desc& out_candidate,
|
||||||
const mkldnn::algorithm alg) const;
|
const mkldnn::algorithm alg) const;
|
||||||
|
|
||||||
AttrPtr pAttr;
|
AttrPtr pAttr;
|
||||||
|
|
||||||
|
bool isMaxPool8 = false;
|
||||||
bool auto_pad = false;
|
bool auto_pad = false;
|
||||||
bool exclude_pad = false;
|
bool exclude_pad = false;
|
||||||
|
std::vector<ptrdiff_t> dilation;
|
||||||
std::vector<ptrdiff_t> stride;
|
std::vector<ptrdiff_t> stride;
|
||||||
std::vector<ptrdiff_t> kernel;
|
std::vector<ptrdiff_t> kernel;
|
||||||
|
|
||||||
@ -59,15 +61,16 @@ private:
|
|||||||
std::vector<ptrdiff_t> effective_pad_begin;
|
std::vector<ptrdiff_t> effective_pad_begin;
|
||||||
std::vector<ptrdiff_t> effective_pad_end;
|
std::vector<ptrdiff_t> effective_pad_end;
|
||||||
|
|
||||||
|
/// Effective dilation. Used to define correct dilation for OneDNN.
|
||||||
|
/// For OneDNN default dilation is vector of zero
|
||||||
|
std::vector<ptrdiff_t> effective_dilation;
|
||||||
|
|
||||||
/// Effective pad value. Describe how much zero element added to input
|
/// Effective pad value. Describe how much zero element added to input
|
||||||
/// data tensor. May be less than "Effective padding" values.
|
/// data tensor. May be less than "Effective padding" values.
|
||||||
/// If pooling window is out of this padding, the region of averaging
|
/// If pooling window is out of this padding, the region of averaging
|
||||||
/// is decreased.
|
/// is decreased.
|
||||||
std::vector<ptrdiff_t> data_pad_begin;
|
std::vector<ptrdiff_t> data_pad_begin;
|
||||||
std::vector<ptrdiff_t> data_pad_end;
|
std::vector<ptrdiff_t> data_pad_end;
|
||||||
|
|
||||||
InferenceEngine::Precision inputPrecision = InferenceEngine::Precision::FP32;
|
|
||||||
InferenceEngine::Precision outputPrecision = InferenceEngine::Precision::FP32;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace MKLDNNPlugin
|
} // namespace MKLDNNPlugin
|
||||||
|
@ -351,8 +351,174 @@ INSTANTIATE_TEST_SUITE_P(smoke_AvgPool_SameLowerPad_CeilRounding_5Dinput, Poolin
|
|||||||
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
||||||
PoolingLayerTest::getTestCaseName);
|
PoolingLayerTest::getTestCaseName);
|
||||||
|
|
||||||
|
////* ========== Max Pooling V8 ========== */
|
||||||
|
|
||||||
|
const std::vector<std::vector<size_t>> dilation = {{1, 1}, {2, 2}};
|
||||||
|
const std::vector<std::vector<size_t >> dilation3D = {{1, 1, 1}, {2, 2, 2}};
|
||||||
|
|
||||||
|
/* ========== Explicit Pad Floor Rounding ========== */
|
||||||
|
const auto maxPoolv8_ExplicitPad_FloorRounding_Params = ::testing::Combine(
|
||||||
|
::testing::ValuesIn(kernels),
|
||||||
|
::testing::ValuesIn(strides),
|
||||||
|
::testing::ValuesIn(dilation),
|
||||||
|
::testing::ValuesIn(padBegins),
|
||||||
|
::testing::ValuesIn(padEnds),
|
||||||
|
::testing::Values(ngraph::op::RoundingType::FLOOR),
|
||||||
|
::testing::Values(ngraph::op::PadType::EXPLICIT)
|
||||||
|
);
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(smoke_MaxPoolV8_ExplicitPad_FloorRounding, MaxPoolingV8LayerTest,
|
||||||
|
::testing::Combine(
|
||||||
|
maxPoolv8_ExplicitPad_FloorRounding_Params,
|
||||||
|
::testing::ValuesIn(netPrecisions),
|
||||||
|
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||||
|
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||||
|
::testing::Values(InferenceEngine::Layout::ANY),
|
||||||
|
::testing::Values(InferenceEngine::Layout::ANY),
|
||||||
|
::testing::Values(std::vector<size_t >({1, 3, 30, 30})),
|
||||||
|
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
||||||
|
MaxPoolingV8LayerTest::getTestCaseName);
|
||||||
|
|
||||||
|
/* ========== Same Upper Pad Floor Rounding ========== */
|
||||||
|
const auto maxPoolv8_SameUpperPad_FloorRounding_Params = ::testing::Combine(
|
||||||
|
::testing::ValuesIn(kernels),
|
||||||
|
::testing::ValuesIn(strides),
|
||||||
|
::testing::ValuesIn(dilation),
|
||||||
|
::testing::ValuesIn(padBegins),
|
||||||
|
::testing::ValuesIn(padEnds),
|
||||||
|
::testing::Values(ngraph::op::RoundingType::FLOOR),
|
||||||
|
::testing::Values(ngraph::op::PadType::SAME_UPPER)
|
||||||
|
);
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(smoke_MaxPoolv8_SameUpperPad_FloorRounding, MaxPoolingV8LayerTest,
|
||||||
|
::testing::Combine(
|
||||||
|
maxPoolv8_SameUpperPad_FloorRounding_Params,
|
||||||
|
::testing::ValuesIn(netPrecisions),
|
||||||
|
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||||
|
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||||
|
::testing::Values(InferenceEngine::Layout::ANY),
|
||||||
|
::testing::Values(InferenceEngine::Layout::ANY),
|
||||||
|
::testing::Values(std::vector<size_t >({1, 3, 30, 30})),
|
||||||
|
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
||||||
|
MaxPoolingV8LayerTest::getTestCaseName);
|
||||||
|
|
||||||
|
/* ========== Same Lower Pad Floor Rounding ========== */
|
||||||
|
const auto maxPoolv8_SameLowerPad_FloorRounding_Params = ::testing::Combine(
|
||||||
|
::testing::ValuesIn(kernels),
|
||||||
|
::testing::ValuesIn(strides),
|
||||||
|
::testing::ValuesIn(dilation),
|
||||||
|
::testing::ValuesIn(padBegins),
|
||||||
|
::testing::ValuesIn(padEnds),
|
||||||
|
::testing::Values(ngraph::op::RoundingType::FLOOR),
|
||||||
|
::testing::Values(ngraph::op::PadType::SAME_LOWER)
|
||||||
|
);
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(smoke_MaxPoolv8_SameLowerPad_FloorRounding, MaxPoolingV8LayerTest,
|
||||||
|
::testing::Combine(
|
||||||
|
maxPoolv8_SameLowerPad_FloorRounding_Params,
|
||||||
|
::testing::ValuesIn(netPrecisions),
|
||||||
|
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||||
|
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||||
|
::testing::Values(InferenceEngine::Layout::ANY),
|
||||||
|
::testing::Values(InferenceEngine::Layout::ANY),
|
||||||
|
::testing::Values(std::vector<size_t >({1, 3, 30, 30})),
|
||||||
|
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
||||||
|
MaxPoolingV8LayerTest::getTestCaseName);
|
||||||
|
|
||||||
|
/* ========= Explicit Pad Floor Rounding 5D input========== */
|
||||||
|
const auto maxPoolv8_ExplicitPad_FloorRounding_5Dinput_Params = ::testing::Combine(
|
||||||
|
::testing::ValuesIn(kernel3D),
|
||||||
|
::testing::ValuesIn(strides3D),
|
||||||
|
::testing::Values(dilation3D[0]),
|
||||||
|
::testing::ValuesIn(padBegins3D),
|
||||||
|
::testing::ValuesIn(padEnds3D),
|
||||||
|
::testing::Values(ngraph::op::RoundingType::FLOOR),
|
||||||
|
::testing::Values(ngraph::op::PadType::EXPLICIT)
|
||||||
|
);
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(smoke_MaxPoolv8_ExplicitPad_FloorRounding_5Dinput, MaxPoolingV8LayerTest,
|
||||||
|
::testing::Combine(
|
||||||
|
maxPoolv8_ExplicitPad_FloorRounding_5Dinput_Params,
|
||||||
|
::testing::ValuesIn(netPrecisions),
|
||||||
|
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||||
|
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||||
|
::testing::Values(InferenceEngine::Layout::ANY),
|
||||||
|
::testing::Values(InferenceEngine::Layout::ANY),
|
||||||
|
::testing::Values(std::vector<size_t >({32, 32, 2, 2, 2})),
|
||||||
|
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
||||||
|
MaxPoolingV8LayerTest::getTestCaseName);
|
||||||
|
|
||||||
|
/* ========= Same Upper Pad Floor Rounding 5D input========== */
|
||||||
|
const auto maxPoolv8_SameUpperPad_FloorRounding_5Dinput_Params = ::testing::Combine(
|
||||||
|
::testing::ValuesIn(kernel3D),
|
||||||
|
::testing::ValuesIn(strides3D),
|
||||||
|
::testing::ValuesIn(dilation3D),
|
||||||
|
::testing::ValuesIn(padBegins3D),
|
||||||
|
::testing::ValuesIn(padEnds3D),
|
||||||
|
::testing::Values(ngraph::op::RoundingType::FLOOR),
|
||||||
|
::testing::Values(ngraph::op::PadType::SAME_UPPER)
|
||||||
|
);
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(smoke_MaxPoolv8_SameUpperPad_FloorRounding_5Dinput, MaxPoolingV8LayerTest,
|
||||||
|
::testing::Combine(
|
||||||
|
maxPoolv8_SameUpperPad_FloorRounding_5Dinput_Params,
|
||||||
|
::testing::ValuesIn(netPrecisions),
|
||||||
|
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||||
|
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||||
|
::testing::Values(InferenceEngine::Layout::ANY),
|
||||||
|
::testing::Values(InferenceEngine::Layout::ANY),
|
||||||
|
::testing::Values(std::vector<size_t >({32, 32, 2, 2, 2})),
|
||||||
|
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
||||||
|
MaxPoolingV8LayerTest::getTestCaseName);
|
||||||
|
|
||||||
|
/* ========= Same Lower Pad Ceil Rounding 5D input========== */
|
||||||
|
const auto maxPoolv8_SameLowerPad_CeilRounding_5Dinput_Params = ::testing::Combine(
|
||||||
|
::testing::ValuesIn(kernel3D),
|
||||||
|
::testing::ValuesIn(strides3D),
|
||||||
|
::testing::ValuesIn(dilation3D),
|
||||||
|
::testing::ValuesIn(padBegins3D),
|
||||||
|
::testing::ValuesIn(padEnds3D),
|
||||||
|
::testing::Values(ngraph::op::RoundingType::CEIL),
|
||||||
|
::testing::Values(ngraph::op::PadType::SAME_LOWER)
|
||||||
|
);
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(smoke_MaxPoolv8_SameLowerPad_CeilRounding_5Dinput, MaxPoolingV8LayerTest,
|
||||||
|
::testing::Combine(
|
||||||
|
maxPoolv8_SameLowerPad_CeilRounding_5Dinput_Params,
|
||||||
|
::testing::ValuesIn(netPrecisions),
|
||||||
|
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||||
|
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||||
|
::testing::Values(InferenceEngine::Layout::ANY),
|
||||||
|
::testing::Values(InferenceEngine::Layout::ANY),
|
||||||
|
::testing::Values(std::vector<size_t >({32, 32, 2, 2, 2})),
|
||||||
|
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
||||||
|
MaxPoolingV8LayerTest::getTestCaseName);
|
||||||
|
|
||||||
|
/* ========= Explicit Pad Ceil Rounding ========== */
|
||||||
|
const auto maxPoolv8_ExplicitPad_CeilRounding_Params = ::testing::Combine(
|
||||||
|
::testing::ValuesIn(kernels),
|
||||||
|
::testing::ValuesIn(strides),
|
||||||
|
::testing::ValuesIn(dilation),
|
||||||
|
::testing::ValuesIn(padBegins),
|
||||||
|
::testing::ValuesIn(padEnds),
|
||||||
|
::testing::Values(ngraph::op::RoundingType::CEIL),
|
||||||
|
::testing::Values(ngraph::op::PadType::EXPLICIT)
|
||||||
|
);
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(smoke_MaxPoolv8_ExplicitPad_CeilRounding, MaxPoolingV8LayerTest,
|
||||||
|
::testing::Combine(
|
||||||
|
maxPoolv8_ExplicitPad_CeilRounding_Params,
|
||||||
|
::testing::ValuesIn(netPrecisions),
|
||||||
|
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||||
|
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||||
|
::testing::Values(InferenceEngine::Layout::ANY),
|
||||||
|
::testing::Values(InferenceEngine::Layout::ANY),
|
||||||
|
::testing::Values(std::vector<size_t >({1, 3, 30, 30})),
|
||||||
|
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
||||||
|
MaxPoolingV8LayerTest::getTestCaseName);
|
||||||
|
|
||||||
////* ========== Avg and Max Polling Cases ========== */
|
////* ========== Avg and Max Polling Cases ========== */
|
||||||
/* ========== Valid Pad Rounding Not Applicable ========== */
|
/* ========== Valid Pad Rounding Not Applicable ========== */
|
||||||
const auto allPools_ValidPad_Params = ::testing::Combine(
|
const auto allPools_ValidPad_Params = ::testing::Combine(
|
||||||
::testing::Values(ngraph::helpers::PoolingTypes::MAX, ngraph::helpers::PoolingTypes::AVG),
|
::testing::Values(ngraph::helpers::PoolingTypes::MAX, ngraph::helpers::PoolingTypes::AVG),
|
||||||
::testing::ValuesIn(kernels),
|
::testing::ValuesIn(kernels),
|
||||||
@ -366,14 +532,37 @@ const auto allPools_ValidPad_Params = ::testing::Combine(
|
|||||||
);
|
);
|
||||||
|
|
||||||
INSTANTIATE_TEST_SUITE_P(smoke_MAX_and_AVGPool_ValidPad, PoolingLayerTest,
|
INSTANTIATE_TEST_SUITE_P(smoke_MAX_and_AVGPool_ValidPad, PoolingLayerTest,
|
||||||
::testing::Combine(
|
::testing::Combine(
|
||||||
allPools_ValidPad_Params,
|
allPools_ValidPad_Params,
|
||||||
::testing::ValuesIn(netPrecisions),
|
::testing::ValuesIn(netPrecisions),
|
||||||
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||||
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||||
::testing::Values(InferenceEngine::Layout::ANY),
|
::testing::Values(InferenceEngine::Layout::ANY),
|
||||||
::testing::Values(InferenceEngine::Layout::ANY),
|
::testing::Values(InferenceEngine::Layout::ANY),
|
||||||
::testing::Values(std::vector<size_t >({1, 3, 30, 30})),
|
::testing::Values(std::vector<size_t >({1, 3, 30, 30})),
|
||||||
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
||||||
PoolingLayerTest::getTestCaseName);
|
PoolingLayerTest::getTestCaseName);
|
||||||
|
|
||||||
|
const auto maxPoolv8_ValidPad_Params = ::testing::Combine(
|
||||||
|
::testing::ValuesIn(kernels),
|
||||||
|
::testing::ValuesIn(strides),
|
||||||
|
::testing::ValuesIn(dilation),
|
||||||
|
::testing::Values(std::vector<size_t>({0, 0})),
|
||||||
|
::testing::Values(std::vector<size_t>({0, 0})),
|
||||||
|
::testing::Values(ngraph::op::RoundingType::FLOOR), // placeholder value - Rounding Type not applicable for Valid pad type
|
||||||
|
::testing::Values(ngraph::op::PadType::VALID)
|
||||||
|
);
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(smoke_MAXPoolv8_ValidPad, MaxPoolingV8LayerTest,
|
||||||
|
::testing::Combine(
|
||||||
|
maxPoolv8_ValidPad_Params,
|
||||||
|
::testing::ValuesIn(netPrecisions),
|
||||||
|
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||||
|
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
||||||
|
::testing::Values(InferenceEngine::Layout::ANY),
|
||||||
|
::testing::Values(InferenceEngine::Layout::ANY),
|
||||||
|
::testing::Values(std::vector<size_t >({1, 3, 30, 30})),
|
||||||
|
::testing::Values(CommonTestUtils::DEVICE_CPU)),
|
||||||
|
MaxPoolingV8LayerTest::getTestCaseName);
|
||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
|
@ -20,6 +20,11 @@ using poolLayerCpuTestParamsSet = std::tuple<LayerTestsDefinitions::poolSpecific
|
|||||||
CPUSpecificParams,
|
CPUSpecificParams,
|
||||||
fusingSpecificParams>;
|
fusingSpecificParams>;
|
||||||
|
|
||||||
|
using maxPoolV8LayerCpuTestParamsSet = std::tuple<LayerTestsDefinitions::maxPoolV8SpecificParams,
|
||||||
|
InputShape,
|
||||||
|
ElementType,
|
||||||
|
CPUSpecificParams>;
|
||||||
|
|
||||||
class PoolingLayerCPUTest : public testing::WithParamInterface<poolLayerCpuTestParamsSet>,
|
class PoolingLayerCPUTest : public testing::WithParamInterface<poolLayerCpuTestParamsSet>,
|
||||||
virtual public SubgraphBaseTest, public CpuTestWithFusing {
|
virtual public SubgraphBaseTest, public CpuTestWithFusing {
|
||||||
public:
|
public:
|
||||||
@ -68,8 +73,6 @@ public:
|
|||||||
results << CPUTestsBase::getTestCaseName(cpuParams);
|
results << CPUTestsBase::getTestCaseName(cpuParams);
|
||||||
results << CpuTestWithFusing::getTestCaseName(fusingParams);
|
results << CpuTestWithFusing::getTestCaseName(fusingParams);
|
||||||
return results.str();
|
return results.str();
|
||||||
|
|
||||||
return results.str();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
@ -98,7 +101,10 @@ protected:
|
|||||||
if (selectedType.empty()) {
|
if (selectedType.empty()) {
|
||||||
selectedType = getPrimitiveType();
|
selectedType = getPrimitiveType();
|
||||||
}
|
}
|
||||||
selectedType = selectedType + "_" + InferenceEngine::details::convertPrecision(inPrc).name();
|
if (isInt8)
|
||||||
|
selectedType = selectedType + "_I8";
|
||||||
|
else
|
||||||
|
selectedType = makeSelectedTypeStr(selectedType, inPrc);
|
||||||
|
|
||||||
init_input_shapes({inputShapes});
|
init_input_shapes({inputShapes});
|
||||||
|
|
||||||
@ -124,11 +130,87 @@ protected:
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
class MaxPoolingV8LayerCPUTest : public testing::WithParamInterface<maxPoolV8LayerCpuTestParamsSet>,
|
||||||
|
virtual public SubgraphBaseTest, public CPUTestsBase {
|
||||||
|
public:
|
||||||
|
static std::string getTestCaseName(const testing::TestParamInfo<maxPoolV8LayerCpuTestParamsSet>& obj) {
|
||||||
|
LayerTestsDefinitions::maxPoolV8SpecificParams basicParamsSet;
|
||||||
|
InputShape inputShapes;
|
||||||
|
ElementType inPrc;
|
||||||
|
CPUSpecificParams cpuParams;
|
||||||
|
std::tie(basicParamsSet, inputShapes, inPrc, cpuParams) = obj.param;
|
||||||
|
|
||||||
|
std::vector<size_t> kernel, stride, dilation;
|
||||||
|
std::vector<size_t> padBegin, padEnd;
|
||||||
|
ngraph::op::PadType padType;
|
||||||
|
ngraph::op::RoundingType roundingType;
|
||||||
|
std::tie(kernel, stride, dilation, padBegin, padEnd, roundingType, padType) = basicParamsSet;
|
||||||
|
|
||||||
|
std::ostringstream results;
|
||||||
|
results << "IS=(";
|
||||||
|
results << CommonTestUtils::partialShape2str({inputShapes.first}) << ")_";
|
||||||
|
results << "TS=";
|
||||||
|
for (const auto& shape : inputShapes.second) {
|
||||||
|
results << CommonTestUtils::vec2str(shape) << "_";
|
||||||
|
}
|
||||||
|
results << "Prc=" << inPrc << "_";
|
||||||
|
results << "MaxPool_";
|
||||||
|
results << "K" << CommonTestUtils::vec2str(kernel) << "_";
|
||||||
|
results << "S" << CommonTestUtils::vec2str(stride) << "_";
|
||||||
|
results << "D" << CommonTestUtils::vec2str(dilation) << "_";
|
||||||
|
results << "PB" << CommonTestUtils::vec2str(padBegin) << "_";
|
||||||
|
results << "PE" << CommonTestUtils::vec2str(padEnd) << "_";
|
||||||
|
results << "Rounding=" << roundingType << "_";
|
||||||
|
results << "AutoPad=" << padType << "_";
|
||||||
|
|
||||||
|
results << CPUTestsBase::getTestCaseName(cpuParams);
|
||||||
|
return results.str();
|
||||||
|
}
|
||||||
|
|
||||||
|
protected:
|
||||||
|
void SetUp() override {
|
||||||
|
targetDevice = CommonTestUtils::DEVICE_CPU;
|
||||||
|
|
||||||
|
LayerTestsDefinitions::maxPoolV8SpecificParams basicParamsSet;
|
||||||
|
InputShape inputShapes;
|
||||||
|
ElementType inPrc;
|
||||||
|
CPUSpecificParams cpuParams;
|
||||||
|
std::tie(basicParamsSet, inputShapes, inPrc, cpuParams) = this->GetParam();
|
||||||
|
|
||||||
|
std::vector<size_t> kernel, stride, dilation;
|
||||||
|
std::vector<size_t> padBegin, padEnd;
|
||||||
|
ngraph::op::PadType padType;
|
||||||
|
ngraph::op::RoundingType roundingType;
|
||||||
|
std::tie(kernel, stride, dilation, padBegin, padEnd, roundingType, padType) = basicParamsSet;
|
||||||
|
std::tie(inFmts, outFmts, priority, selectedType) = cpuParams;
|
||||||
|
if (selectedType.empty()) {
|
||||||
|
selectedType = getPrimitiveType();
|
||||||
|
}
|
||||||
|
selectedType = makeSelectedTypeStr(selectedType, inPrc);
|
||||||
|
|
||||||
|
init_input_shapes({inputShapes});
|
||||||
|
|
||||||
|
auto params = ngraph::builder::makeDynamicParams(inPrc, inputDynamicShapes);
|
||||||
|
std::shared_ptr<ngraph::Node> pooling = ngraph::builder::makeMaxPoolingV8(params[0], stride, dilation, padBegin, padEnd,
|
||||||
|
kernel, roundingType, padType);
|
||||||
|
pooling->get_rt_info() = getCPUInfo();
|
||||||
|
ngraph::ResultVector results{std::make_shared<ngraph::opset3::Result>(pooling->output(0))};
|
||||||
|
function = std::make_shared<ngraph::Function>(results, params, "MaxPooling");
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
TEST_P(PoolingLayerCPUTest, CompareWithRefs) {
|
TEST_P(PoolingLayerCPUTest, CompareWithRefs) {
|
||||||
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||||
|
|
||||||
run();
|
run();
|
||||||
// CheckPluginRelatedResults(executableNetwork, "Pooling");
|
CheckPluginRelatedResults(executableNetwork, "Pooling");
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_P(MaxPoolingV8LayerCPUTest, CompareWithRefs) {
|
||||||
|
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||||
|
|
||||||
|
run();
|
||||||
|
CheckPluginRelatedResults(executableNetwork, "Pooling");
|
||||||
}
|
}
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
@ -291,6 +373,20 @@ const std::vector<LayerTestsDefinitions::poolSpecificParams> paramsMax4D = {
|
|||||||
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false },
|
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false },
|
||||||
};
|
};
|
||||||
|
|
||||||
|
const std::vector<LayerTestsDefinitions::maxPoolV8SpecificParams> paramsMaxV84D = {
|
||||||
|
LayerTestsDefinitions::maxPoolV8SpecificParams{ {2, 2}, {2, 2}, {1, 1}, {0, 0}, {0, 0},
|
||||||
|
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_LOWER },
|
||||||
|
};
|
||||||
|
|
||||||
|
const std::vector<LayerTestsDefinitions::maxPoolV8SpecificParams> paramsMaxV84D_ref = {
|
||||||
|
LayerTestsDefinitions::maxPoolV8SpecificParams{ {2, 2}, {2, 2}, {2, 2}, {0, 0}, {0, 0},
|
||||||
|
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_UPPER },
|
||||||
|
LayerTestsDefinitions::maxPoolV8SpecificParams{ {4, 2}, {2, 2}, {1, 2}, {0, 0}, {0, 0},
|
||||||
|
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT },
|
||||||
|
LayerTestsDefinitions::maxPoolV8SpecificParams{ {4, 2}, {2, 1}, {2, 2}, {0, 0}, {0, 0},
|
||||||
|
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT },
|
||||||
|
};
|
||||||
|
|
||||||
const std::vector<LayerTestsDefinitions::poolSpecificParams> paramsAvg4D = {
|
const std::vector<LayerTestsDefinitions::poolSpecificParams> paramsAvg4D = {
|
||||||
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2}, {2, 2}, {1, 0}, {0, 0},
|
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2}, {2, 2}, {1, 0}, {0, 0},
|
||||||
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_LOWER, true },
|
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_LOWER, true },
|
||||||
@ -321,6 +417,22 @@ INSTANTIATE_TEST_SUITE_P(smoke_MaxPool_CPU_4D, PoolingLayerCPUTest,
|
|||||||
::testing::Values(emptyFusingSpec)),
|
::testing::Values(emptyFusingSpec)),
|
||||||
PoolingLayerCPUTest::getTestCaseName);
|
PoolingLayerCPUTest::getTestCaseName);
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(smoke_MaxPoolV8_CPU_4D, MaxPoolingV8LayerCPUTest,
|
||||||
|
::testing::Combine(
|
||||||
|
::testing::ValuesIn(paramsMaxV84D),
|
||||||
|
::testing::ValuesIn(inputShapes4D),
|
||||||
|
::testing::ValuesIn(inpOutPrecision),
|
||||||
|
::testing::ValuesIn(filterCPUInfoForDevice(vecCpuConfigs))),
|
||||||
|
MaxPoolingV8LayerCPUTest::getTestCaseName);
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(smoke_MaxPoolV8_CPU_4D_ref, MaxPoolingV8LayerCPUTest,
|
||||||
|
::testing::Combine(
|
||||||
|
::testing::ValuesIn(paramsMaxV84D_ref),
|
||||||
|
::testing::ValuesIn(inputShapes4D),
|
||||||
|
::testing::ValuesIn(inpOutPrecision),
|
||||||
|
::testing::Values(ref)),
|
||||||
|
MaxPoolingV8LayerCPUTest::getTestCaseName);
|
||||||
|
|
||||||
INSTANTIATE_TEST_SUITE_P(smoke_AvgPool_CPU_4D, PoolingLayerCPUTest,
|
INSTANTIATE_TEST_SUITE_P(smoke_AvgPool_CPU_4D, PoolingLayerCPUTest,
|
||||||
::testing::Combine(
|
::testing::Combine(
|
||||||
::testing::ValuesIn(paramsAvg4D),
|
::testing::ValuesIn(paramsAvg4D),
|
||||||
@ -349,10 +461,24 @@ const std::vector<LayerTestsDefinitions::poolSpecificParams> paramsMax5D = {
|
|||||||
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_UPPER, false },
|
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_UPPER, false },
|
||||||
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {2, 2, 2}, {1, 1, 1}, {1, 1, 1}, {1, 1, 1},
|
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {2, 2, 2}, {1, 1, 1}, {1, 1, 1}, {1, 1, 1},
|
||||||
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false },
|
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false },
|
||||||
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {2, 3, 4}, {2, 2, 2}, {1, 1, 1}, {1, 2, 3},
|
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {3, 3, 3}, {2, 2, 2}, {1, 1, 1}, {1, 1, 1},
|
||||||
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false },
|
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false },
|
||||||
};
|
};
|
||||||
|
|
||||||
|
const std::vector<LayerTestsDefinitions::maxPoolV8SpecificParams> paramsMaxV85D = {
|
||||||
|
LayerTestsDefinitions::maxPoolV8SpecificParams{ {2, 2, 2}, {1, 1, 1}, {1, 1, 1}, {0, 0, 0}, {0, 0, 0},
|
||||||
|
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_LOWER },
|
||||||
|
};
|
||||||
|
|
||||||
|
const std::vector<LayerTestsDefinitions::maxPoolV8SpecificParams> paramsMaxV85D_ref = {
|
||||||
|
LayerTestsDefinitions::maxPoolV8SpecificParams{ {2, 2, 2}, {1, 1, 1}, {2, 2, 2}, {0, 0, 0}, {0, 0, 0},
|
||||||
|
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_UPPER },
|
||||||
|
LayerTestsDefinitions::maxPoolV8SpecificParams{ {2, 2, 2}, {1, 1, 1}, {2, 2, 2}, {1, 1, 1}, {1, 1, 1},
|
||||||
|
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT },
|
||||||
|
LayerTestsDefinitions::maxPoolV8SpecificParams{ {2, 3, 4}, {2, 2, 2}, {2, 1, 1}, {1, 1, 1}, {1, 2, 2},
|
||||||
|
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT },
|
||||||
|
};
|
||||||
|
|
||||||
const std::vector<LayerTestsDefinitions::poolSpecificParams> paramsAvg5D = {
|
const std::vector<LayerTestsDefinitions::poolSpecificParams> paramsAvg5D = {
|
||||||
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2, 2}, {2, 2, 2}, {1, 0, 0}, {0, 0, 0},
|
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2, 2}, {2, 2, 2}, {1, 0, 0}, {0, 0, 0},
|
||||||
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_LOWER, true },
|
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_LOWER, true },
|
||||||
@ -366,7 +492,7 @@ const std::vector<LayerTestsDefinitions::poolSpecificParams> paramsAvg5D = {
|
|||||||
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, true },
|
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, true },
|
||||||
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {3, 3, 3}, {3, 3, 3}, {1, 1, 1}, {0, 0, 0},
|
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {3, 3, 3}, {3, 3, 3}, {1, 1, 1}, {0, 0, 0},
|
||||||
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, true },
|
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, true },
|
||||||
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {4, 4, 4}, {4, 4, 4}, {2, 2, 2}, {2, 2, 2},
|
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {4, 4, 4}, {2, 2, 2}, {2, 2, 2}, {2, 2, 2},
|
||||||
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, true },
|
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, true },
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -385,6 +511,22 @@ INSTANTIATE_TEST_SUITE_P(smoke_MaxPool_CPU_5D, PoolingLayerCPUTest,
|
|||||||
::testing::Values(emptyFusingSpec)),
|
::testing::Values(emptyFusingSpec)),
|
||||||
PoolingLayerCPUTest::getTestCaseName);
|
PoolingLayerCPUTest::getTestCaseName);
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(smoke_MaxPoolV8_CPU_5D, MaxPoolingV8LayerCPUTest,
|
||||||
|
::testing::Combine(
|
||||||
|
::testing::ValuesIn(paramsMaxV85D),
|
||||||
|
::testing::ValuesIn(inputShapes5D),
|
||||||
|
::testing::ValuesIn(inpOutPrecision),
|
||||||
|
::testing::ValuesIn(filterCPUInfoForDevice(vecCpuConfigs))),
|
||||||
|
MaxPoolingV8LayerCPUTest::getTestCaseName);
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(smoke_MaxPoolV8_CPU_5D_ref, MaxPoolingV8LayerCPUTest,
|
||||||
|
::testing::Combine(
|
||||||
|
::testing::ValuesIn(paramsMaxV85D_ref),
|
||||||
|
::testing::ValuesIn(inputShapes5D),
|
||||||
|
::testing::ValuesIn(inpOutPrecision),
|
||||||
|
::testing::Values(ref)),
|
||||||
|
MaxPoolingV8LayerCPUTest::getTestCaseName);
|
||||||
|
|
||||||
INSTANTIATE_TEST_SUITE_P(smoke_AvgPool_CPU_5D, PoolingLayerCPUTest,
|
INSTANTIATE_TEST_SUITE_P(smoke_AvgPool_CPU_5D, PoolingLayerCPUTest,
|
||||||
::testing::Combine(
|
::testing::Combine(
|
||||||
::testing::ValuesIn(paramsAvg5D),
|
::testing::ValuesIn(paramsAvg5D),
|
||||||
|
@ -19,4 +19,9 @@ TEST_P(GlobalPoolingLayerTest, CompareWithRefs) {
|
|||||||
PluginCache::get().reset();
|
PluginCache::get().reset();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TEST_P(MaxPoolingV8LayerTest, CompareWithRefs) {
|
||||||
|
Run();
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace LayerTestsDefinitions
|
} // namespace LayerTestsDefinitions
|
||||||
|
@ -48,6 +48,27 @@ typedef std::tuple<
|
|||||||
std::string // Device name
|
std::string // Device name
|
||||||
> globalPoolLayerTestParamsSet;
|
> globalPoolLayerTestParamsSet;
|
||||||
|
|
||||||
|
typedef std::tuple<
|
||||||
|
std::vector<size_t>, // Kernel size
|
||||||
|
std::vector<size_t>, // Stride
|
||||||
|
std::vector<size_t>, // Dilation
|
||||||
|
std::vector<size_t>, // Pad begin
|
||||||
|
std::vector<size_t>, // Pad end
|
||||||
|
ngraph::op::RoundingType, // Rounding type
|
||||||
|
ngraph::op::PadType // Pad type
|
||||||
|
> maxPoolV8SpecificParams;
|
||||||
|
|
||||||
|
typedef std::tuple<
|
||||||
|
maxPoolV8SpecificParams,
|
||||||
|
InferenceEngine::Precision, // Net precision
|
||||||
|
InferenceEngine::Precision, // Input precision
|
||||||
|
InferenceEngine::Precision, // Output precision
|
||||||
|
InferenceEngine::Layout, // Input layout
|
||||||
|
InferenceEngine::Layout, // Output layout
|
||||||
|
std::vector<size_t>, // Input shape
|
||||||
|
std::string // Device name
|
||||||
|
> maxPoolV8LayerTestParamsSet;
|
||||||
|
|
||||||
class PoolingLayerTest : public testing::WithParamInterface<poolLayerTestParamsSet>,
|
class PoolingLayerTest : public testing::WithParamInterface<poolLayerTestParamsSet>,
|
||||||
virtual public LayerTestsUtils::LayerTestsCommon {
|
virtual public LayerTestsUtils::LayerTestsCommon {
|
||||||
public:
|
public:
|
||||||
@ -66,4 +87,13 @@ protected:
|
|||||||
void SetUp() override;
|
void SetUp() override;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
class MaxPoolingV8LayerTest : public testing::WithParamInterface<maxPoolV8LayerTestParamsSet>,
|
||||||
|
virtual public LayerTestsUtils::LayerTestsCommon {
|
||||||
|
public:
|
||||||
|
static std::string getTestCaseName(const testing::TestParamInfo<maxPoolV8LayerTestParamsSet>& obj);
|
||||||
|
|
||||||
|
protected:
|
||||||
|
void SetUp() override;
|
||||||
|
};
|
||||||
|
|
||||||
} // namespace LayerTestsDefinitions
|
} // namespace LayerTestsDefinitions
|
||||||
|
@ -94,6 +94,38 @@ std::string GlobalPoolingLayerTest::getTestCaseName(const testing::TestParamInfo
|
|||||||
return result.str();
|
return result.str();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
std::string MaxPoolingV8LayerTest::getTestCaseName(const testing::TestParamInfo<maxPoolV8LayerTestParamsSet>& obj) {
|
||||||
|
maxPoolV8SpecificParams poolParams;
|
||||||
|
InferenceEngine::Precision netPrecision;
|
||||||
|
InferenceEngine::Precision inPrc, outPrc;
|
||||||
|
InferenceEngine::Layout inLayout, outLayout;
|
||||||
|
std::vector<size_t> inputShapes;
|
||||||
|
std::string targetDevice;
|
||||||
|
std::tie(poolParams, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShapes, targetDevice) = obj.param;
|
||||||
|
std::vector<size_t> kernel, stride, dilation;
|
||||||
|
std::vector<size_t> padBegin, padEnd;
|
||||||
|
ngraph::op::PadType padType;
|
||||||
|
ngraph::op::RoundingType roundingType;
|
||||||
|
std::tie(kernel, stride, dilation, padBegin, padEnd, roundingType, padType) = poolParams;
|
||||||
|
|
||||||
|
std::ostringstream result;
|
||||||
|
result << "IS=" << CommonTestUtils::vec2str(inputShapes) << "_";
|
||||||
|
result << "K" << CommonTestUtils::vec2str(kernel) << "_";
|
||||||
|
result << "S" << CommonTestUtils::vec2str(stride) << "_";
|
||||||
|
result << "D" << CommonTestUtils::vec2str(dilation) << "_";
|
||||||
|
result << "PB" << CommonTestUtils::vec2str(padBegin) << "_";
|
||||||
|
result << "PE" << CommonTestUtils::vec2str(padEnd) << "_";
|
||||||
|
result << "Rounding=" << roundingType << "_";
|
||||||
|
result << "AutoPad=" << padType << "_";
|
||||||
|
result << "netPRC=" << netPrecision.name() << "_";
|
||||||
|
result << "inPRC=" << inPrc.name() << "_";
|
||||||
|
result << "outPRC=" << outPrc.name() << "_";
|
||||||
|
result << "inL=" << inLayout << "_";
|
||||||
|
result << "outL=" << outLayout << "_";
|
||||||
|
result << "trgDev=" << targetDevice;
|
||||||
|
return result.str();
|
||||||
|
}
|
||||||
|
|
||||||
void PoolingLayerTest::SetUp() {
|
void PoolingLayerTest::SetUp() {
|
||||||
poolSpecificParams poolParams;
|
poolSpecificParams poolParams;
|
||||||
std::vector<size_t> inputShape;
|
std::vector<size_t> inputShape;
|
||||||
@ -159,4 +191,28 @@ void GlobalPoolingLayerTest::SetUp() {
|
|||||||
ngraph::ResultVector results{std::make_shared<ngraph::opset3::Result>(pooling)};
|
ngraph::ResultVector results{std::make_shared<ngraph::opset3::Result>(pooling)};
|
||||||
function = std::make_shared<ngraph::Function>(results, params, "pooling");
|
function = std::make_shared<ngraph::Function>(results, params, "pooling");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void MaxPoolingV8LayerTest::SetUp() {
|
||||||
|
maxPoolV8SpecificParams poolParams;
|
||||||
|
std::vector<size_t> inputShape;
|
||||||
|
InferenceEngine::Precision netPrecision;
|
||||||
|
std::tie(poolParams, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShape, targetDevice) = this->GetParam();
|
||||||
|
std::vector<size_t> kernel, stride, dilation;
|
||||||
|
std::vector<size_t> padBegin, padEnd;
|
||||||
|
ngraph::op::PadType padType;
|
||||||
|
ngraph::op::RoundingType roundingType;
|
||||||
|
std::tie(kernel, stride, dilation, padBegin, padEnd, roundingType, padType) = poolParams;
|
||||||
|
|
||||||
|
auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
|
||||||
|
auto params = ngraph::builder::makeParams(ngPrc, {inputShape});
|
||||||
|
auto paramOuts = ngraph::helpers::convert2OutputVector(
|
||||||
|
ngraph::helpers::castOps2Nodes<ngraph::op::Parameter>(params));
|
||||||
|
|
||||||
|
std::shared_ptr<ngraph::Node> maxPool = ngraph::builder::makeMaxPoolingV8(paramOuts[0], stride, dilation, padBegin, padEnd,
|
||||||
|
kernel, roundingType, padType);
|
||||||
|
|
||||||
|
ngraph::ResultVector results{std::make_shared<ngraph::opset3::Result>(maxPool->output(0))};
|
||||||
|
function = std::make_shared<ngraph::Function>(results, params, "MaxPoolV8");
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace LayerTestsDefinitions
|
} // namespace LayerTestsDefinitions
|
||||||
|
@ -428,6 +428,15 @@ std::shared_ptr<Node> makePooling(const ngraph::Output<Node> &in,
|
|||||||
bool excludePad,
|
bool excludePad,
|
||||||
const ngraph::helpers::PoolingTypes &poolType);
|
const ngraph::helpers::PoolingTypes &poolType);
|
||||||
|
|
||||||
|
std::shared_ptr<Node> makeMaxPoolingV8(const ngraph::Output<Node> &in,
|
||||||
|
const std::vector<size_t> &strides,
|
||||||
|
const std::vector<size_t> &dilation,
|
||||||
|
const std::vector<size_t> &padsBegin,
|
||||||
|
const std::vector<size_t> &padsEnd,
|
||||||
|
const std::vector<size_t> &kernel,
|
||||||
|
const op::RoundingType &roundingType,
|
||||||
|
const op::PadType &padType);
|
||||||
|
|
||||||
std::shared_ptr<Node> makeROIPooling(const Output<Node>& input,
|
std::shared_ptr<Node> makeROIPooling(const Output<Node>& input,
|
||||||
const Output<Node>& coords,
|
const Output<Node>& coords,
|
||||||
const Shape& output_size,
|
const Shape& output_size,
|
||||||
|
@ -35,5 +35,18 @@ std::shared_ptr<Node> makePooling(const ngraph::Output<Node> &in,
|
|||||||
return pooling;
|
return pooling;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
std::shared_ptr<Node> makeMaxPoolingV8(const ngraph::Output<Node> &in,
|
||||||
|
const std::vector<size_t> &strides,
|
||||||
|
const std::vector<size_t> &dilation,
|
||||||
|
const std::vector<size_t> &padsBegin,
|
||||||
|
const std::vector<size_t> &padsEnd,
|
||||||
|
const std::vector<size_t> &kernel,
|
||||||
|
const op::RoundingType &roundingType,
|
||||||
|
const op::PadType &padType) {
|
||||||
|
std::shared_ptr<ngraph::Node> pooling = std::make_shared<ngraph::opset8::MaxPool>(in, strides, dilation, padsBegin, padsEnd,
|
||||||
|
kernel, roundingType, padType);
|
||||||
|
return pooling;
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace builder
|
} // namespace builder
|
||||||
} // namespace ngraph
|
} // namespace ngraph
|
||||||
|
Loading…
Reference in New Issue
Block a user