[CPU] Added dynamism support for Pad (#8396)
This commit is contained in:
parent
29a3f56003
commit
c82744e435
@ -18,36 +18,37 @@ using namespace mkldnn;
|
|||||||
using namespace MKLDNNPlugin;
|
using namespace MKLDNNPlugin;
|
||||||
using namespace InferenceEngine;
|
using namespace InferenceEngine;
|
||||||
|
|
||||||
|
#define THROW_ERROR IE_THROW() << "Pad layer with name '" << getName() << "' "
|
||||||
|
|
||||||
bool MKLDNNPadNode::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept {
|
bool MKLDNNPadNode::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept {
|
||||||
try {
|
try {
|
||||||
if (isDynamicNgraphNode(op)) {
|
auto pad = ov::as_type_ptr<const ngraph::opset1::Pad>(op);
|
||||||
errorMessage = "Doesn't support op with dynamic shapes";
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
const auto pad = std::dynamic_pointer_cast<const ngraph::opset1::Pad>(op);
|
|
||||||
if (!pad) {
|
if (!pad) {
|
||||||
errorMessage = "Only opset1 Pad operation is supported";
|
errorMessage = "Only opset1 Pad operation is supported";
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
if (std::dynamic_pointer_cast<const ngraph::opset1::Constant>(pad->get_input_node_shared_ptr(PADS_BEGIN_ID)) == nullptr ||
|
|
||||||
std::dynamic_pointer_cast<const ngraph::opset1::Constant>(pad->get_input_node_shared_ptr(PADS_END_ID)) == nullptr ||
|
|
||||||
(pad->get_pad_mode() == ngraph::op::PadMode::CONSTANT && pad->get_input_size() == 4 &&
|
|
||||||
std::dynamic_pointer_cast<const ngraph::opset1::Constant>(pad->get_input_node_shared_ptr(PAD_VALUE_ID)) == nullptr)) {
|
|
||||||
errorMessage = "Only Constant operation on 'pads_begin', 'pads_end', 'pad_value' inpus is supported";
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
const auto pad_mode = pad->get_pad_mode();
|
const auto pad_mode = pad->get_pad_mode();
|
||||||
if (pad_mode != ngraph::op::PadMode::CONSTANT && pad_mode != ngraph::op::PadMode::EDGE && pad_mode != ngraph::op::PadMode::REFLECT &&
|
if (!MKLDNNPlugin::one_of(pad_mode, ngraph::op::PadMode::CONSTANT, ngraph::op::PadMode::EDGE, ngraph::op::PadMode::REFLECT,
|
||||||
pad_mode != ngraph::op::PadMode::SYMMETRIC) {
|
ngraph::op::PadMode::SYMMETRIC)) {
|
||||||
errorMessage = "Has unsupported pad_mode: " + ngraph::as_string(pad_mode);
|
errorMessage = "Has unsupported pad_mode: " + ngraph::as_string(pad_mode);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (op->get_input_node_shared_ptr(PADS_BEGIN_ID)->get_type_info() != ov::op::v0::Constant::get_type_info_static() ||
|
||||||
|
op->get_input_node_shared_ptr(PADS_END_ID)->get_type_info() != ov::op::v0::Constant::get_type_info_static() ||
|
||||||
|
(pad->get_input_size() == 4 && pad->get_pad_mode() == ngraph::op::PadMode::CONSTANT &&
|
||||||
|
op->get_input_node_shared_ptr(PAD_VALUE_ID)->get_type_info() != ov::op::v0::Constant::get_type_info_static())) {
|
||||||
|
// TODO: Support pads_begin, pads_end, pad_value inputs for dynamic shapes.
|
||||||
|
errorMessage = "Only Constant 'pads_begin', 'pads_end' and 'pad_value' inputs are supported.";
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
const auto pb = pad->get_pads_begin();
|
const auto pb = pad->get_pads_begin();
|
||||||
const auto pe = pad->get_pads_end();
|
const auto pe = pad->get_pads_end();
|
||||||
if (std::count_if(pb.begin(), pb.end(), [](ptrdiff_t x) { return x < 0; }) != 0 ||
|
if (std::any_of(pb.begin(), pb.end(), [](ptrdiff_t x) { return x < 0; }) ||
|
||||||
std::count_if(pe.begin(), pe.end(), [](ptrdiff_t x) { return x < 0; }) != 0) {
|
std::any_of(pe.begin(), pe.end(), [](ptrdiff_t x) { return x < 0; })) {
|
||||||
errorMessage = "Doesn't support 'pads_begin' or 'pads_end' negative value";
|
errorMessage = "Doesn't support 'pads_begin' or 'pads_end' with negative values";
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
} catch (...) {
|
} catch (...) {
|
||||||
@ -59,62 +60,61 @@ bool MKLDNNPadNode::isSupportedOperation(const std::shared_ptr<const ngraph::Nod
|
|||||||
MKLDNNPadNode::MKLDNNPadNode(const std::shared_ptr<ngraph::Node>& op, const mkldnn::engine& eng, MKLDNNWeightsSharing::Ptr &cache)
|
MKLDNNPadNode::MKLDNNPadNode(const std::shared_ptr<ngraph::Node>& op, const mkldnn::engine& eng, MKLDNNWeightsSharing::Ptr &cache)
|
||||||
: MKLDNNNode(op, eng, cache) {
|
: MKLDNNNode(op, eng, cache) {
|
||||||
std::string errorMessage;
|
std::string errorMessage;
|
||||||
if (isSupportedOperation(op, errorMessage)) {
|
if (!isSupportedOperation(op, errorMessage)) {
|
||||||
errorPrefix = "Pad node with name '" + op->get_friendly_name() + "'";
|
|
||||||
const auto pad = std::dynamic_pointer_cast<const ngraph::opset1::Pad>(op);
|
|
||||||
|
|
||||||
const auto pb = pad->get_pads_begin();
|
|
||||||
const auto pe = pad->get_pads_end();
|
|
||||||
for (size_t i = 0; i < pb.size(); i++)
|
|
||||||
padsBegin.push_back(static_cast<unsigned int>(pb[i]));
|
|
||||||
for (size_t i = 0; i < pe.size(); i++)
|
|
||||||
padsEnd.push_back(static_cast<unsigned int>(pe[i]));
|
|
||||||
|
|
||||||
const auto pad_mode = pad->get_pad_mode();
|
|
||||||
isPadValueSpecified = pad->get_input_size() == 4;
|
|
||||||
if (pad_mode == ngraph::op::PadMode::CONSTANT) {
|
|
||||||
padMode = CONSTANT;
|
|
||||||
if (isPadValueSpecified) {
|
|
||||||
if (!ngraph::is_scalar(pad->get_input_shape(PAD_VALUE_ID)))
|
|
||||||
IE_THROW() << errorPrefix << " has non scalar 'pad_value' input";
|
|
||||||
padValue = std::dynamic_pointer_cast<const ngraph::opset1::Constant>(pad->get_input_node_shared_ptr(PAD_VALUE_ID))->cast_vector<float>()[0];
|
|
||||||
}
|
|
||||||
} else if (pad_mode == ngraph::op::PadMode::EDGE) {
|
|
||||||
padMode = EDGE;
|
|
||||||
} else if (pad_mode == ngraph::op::PadMode::REFLECT) {
|
|
||||||
padMode = REFLECT;
|
|
||||||
} else if (pad_mode == ngraph::op::PadMode::SYMMETRIC) {
|
|
||||||
padMode = SYMMETRIC;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
IE_THROW(NotImplemented) << errorMessage;
|
IE_THROW(NotImplemented) << errorMessage;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
void MKLDNNPadNode::getSupportedDescriptors() {
|
if (inputShapes.size() != 3 && inputShapes.size() != 4)
|
||||||
if (getParentEdges().size() != 3 && getParentEdges().size() != 4)
|
THROW_ERROR << " has incorrect number of input edges";
|
||||||
IE_THROW() << errorPrefix << " has incorrect number of input edges";
|
if (outputShapes.size() != 1)
|
||||||
if (getChildEdges().empty())
|
THROW_ERROR << "Incorrect number of output edges";
|
||||||
IE_THROW() << errorPrefix << "Incorrect number of output edges";
|
|
||||||
|
|
||||||
const auto srcDims = getInputShapeAtPort(DATA_ID).getStaticDims();
|
const size_t srcDimsRank = inputShapes[DATA_ID].getRank();
|
||||||
const auto dstDims = getOutputShapeAtPort(DATA_ID).getStaticDims();
|
const size_t dstDimsRank = outputShapes[DATA_ID].getRank();
|
||||||
if (srcDims.size() != dstDims.size() || padsBegin.size() != srcDims.size() || padsEnd.size() != srcDims.size())
|
if (srcDimsRank != dstDimsRank)
|
||||||
IE_THROW() << errorPrefix << " has incorrect number of input/output dimensions!";
|
THROW_ERROR << "has incorrect number of input/output dimensions!";
|
||||||
|
|
||||||
if (padMode == REFLECT) {
|
auto pad = ov::as_type_ptr<const ngraph::opset1::Pad>(op);
|
||||||
for (size_t i = 0; i < srcDims.size(); i++) {
|
if (!pad) {
|
||||||
if ((srcDims[i] - 1) < padsBegin[i] || (srcDims[i] - 1) < padsEnd[i])
|
THROW_ERROR << "couldn't be casted to op of opset1";
|
||||||
IE_THROW() << errorPrefix << " has incorrect padsBegin or padsEnd for 'reflect' pad mode";
|
}
|
||||||
}
|
|
||||||
} else if (padMode == SYMMETRIC) {
|
if (op->get_input_node_shared_ptr(PADS_BEGIN_ID)->get_type_info() == ov::op::v0::Constant::get_type_info_static() &&
|
||||||
for (size_t i = 0; i < srcDims.size(); i++) {
|
op->get_input_node_shared_ptr(PADS_END_ID)->get_type_info() == ov::op::v0::Constant::get_type_info_static()) {
|
||||||
if (srcDims[i] < padsBegin[i] || srcDims[i] < padsEnd[i])
|
const auto pb = pad->get_pads_begin();
|
||||||
IE_THROW() << errorPrefix << " has incorrect padsBegin or padsEnd for 'symmetric' pad mode";
|
const auto pe = pad->get_pads_end();
|
||||||
|
|
||||||
|
for (size_t i = 0; i < pb.size(); i++)
|
||||||
|
attrs.padsBegin.push_back(static_cast<unsigned int>(pb[i]));
|
||||||
|
for (size_t i = 0; i < pe.size(); i++)
|
||||||
|
attrs.padsEnd.push_back(static_cast<unsigned int>(pe[i]));
|
||||||
|
|
||||||
|
if (attrs.padsBegin.size() != srcDimsRank || attrs.padsEnd.size() != srcDimsRank)
|
||||||
|
THROW_ERROR << "has incorrect number of input/output dimensions!";
|
||||||
|
}
|
||||||
|
|
||||||
|
const auto pad_mode = pad->get_pad_mode();
|
||||||
|
isPadValueSpecified = pad->get_input_size() == 4;
|
||||||
|
if (pad_mode == ngraph::op::PadMode::CONSTANT) {
|
||||||
|
attrs.padMode = CONSTANT;
|
||||||
|
if (isPadValueSpecified && op->get_input_node_shared_ptr(PAD_VALUE_ID)->get_type_info() == ov::op::v0::Constant::get_type_info_static()) {
|
||||||
|
if (!ngraph::is_scalar(pad->get_input_shape(PAD_VALUE_ID)))
|
||||||
|
THROW_ERROR << "has non scalar 'pad_value' input";
|
||||||
|
attrs.padValue = ov::as_type_ptr<const ngraph::opset1::Constant>(pad->get_input_node_shared_ptr(PAD_VALUE_ID))->cast_vector<float>()[0];
|
||||||
}
|
}
|
||||||
|
} else if (pad_mode == ngraph::op::PadMode::EDGE) {
|
||||||
|
attrs.padMode = EDGE;
|
||||||
|
} else if (pad_mode == ngraph::op::PadMode::REFLECT) {
|
||||||
|
attrs.padMode = REFLECT;
|
||||||
|
} else if (pad_mode == ngraph::op::PadMode::SYMMETRIC) {
|
||||||
|
attrs.padMode = SYMMETRIC;
|
||||||
|
} else {
|
||||||
|
THROW_ERROR << "has unsupported pad_mode: " + ngraph::as_string(pad_mode);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void MKLDNNPadNode::getSupportedDescriptors() {}
|
||||||
|
|
||||||
void MKLDNNPadNode::initSupportedPrimitiveDescriptors() {
|
void MKLDNNPadNode::initSupportedPrimitiveDescriptors() {
|
||||||
if (!supportedPrimitiveDescriptors.empty())
|
if (!supportedPrimitiveDescriptors.empty())
|
||||||
return;
|
return;
|
||||||
@ -126,8 +126,8 @@ void MKLDNNPadNode::initSupportedPrimitiveDescriptors() {
|
|||||||
if (std::find(supportedPrecisions.begin(), supportedPrecisions.end(), precision) == supportedPrecisions.end())
|
if (std::find(supportedPrecisions.begin(), supportedPrecisions.end(), precision) == supportedPrecisions.end())
|
||||||
precision = precision.is_float() ? InferenceEngine::Precision::FP32 : InferenceEngine::Precision::I32;
|
precision = precision.is_float() ? InferenceEngine::Precision::FP32 : InferenceEngine::Precision::I32;
|
||||||
|
|
||||||
auto srcDims = getInputShapeAtPort(DATA_ID).getStaticDims();
|
const auto& inputDataShape = getInputShapeAtPort(DATA_ID);
|
||||||
int numOfDims = srcDims.size();
|
const size_t numOfDims = inputDataShape.getRank();
|
||||||
|
|
||||||
NodeConfig config;
|
NodeConfig config;
|
||||||
config.dynBatchSupport = false;
|
config.dynBatchSupport = false;
|
||||||
@ -151,15 +151,17 @@ void MKLDNNPadNode::initSupportedPrimitiveDescriptors() {
|
|||||||
|
|
||||||
pushSupportedPrimitiveDescriptor(LayoutType::ncsp);
|
pushSupportedPrimitiveDescriptor(LayoutType::ncsp);
|
||||||
|
|
||||||
auto canUseBlocked = [=](const size_t blockSize) {
|
auto canUseBlocked = [&](const size_t blockSize) {
|
||||||
return (padMode == CONSTANT && padsBegin[1] % blockSize == 0 && padsEnd[1] % blockSize == 0) ||
|
const auto& srcDims = inputDataShape.getDims();
|
||||||
(padMode != CONSTANT && padsBegin[1] == 0 && padsEnd[1] == 0);
|
return srcDims[1] != Shape::UNDEFINED_DIM && srcDims[1] % blockSize == 0 &&
|
||||||
|
((attrs.padMode == CONSTANT && attrs.padsBegin[1] % blockSize == 0 && attrs.padsEnd[1] % blockSize == 0) ||
|
||||||
|
(attrs.padMode != CONSTANT && attrs.padsBegin[1] == 0 && attrs.padsEnd[1] == 0));
|
||||||
};
|
};
|
||||||
|
|
||||||
if (numOfDims == 4 || numOfDims == 5) {
|
if (numOfDims == 4 || numOfDims == 5) {
|
||||||
if (srcDims[1] % 8 == 0 && canUseBlocked(8))
|
if (canUseBlocked(8))
|
||||||
pushSupportedPrimitiveDescriptor(LayoutType::nCsp8c);
|
pushSupportedPrimitiveDescriptor(LayoutType::nCsp8c);
|
||||||
if (srcDims[1] % 16 == 0 && canUseBlocked(16))
|
if (canUseBlocked(16))
|
||||||
pushSupportedPrimitiveDescriptor(LayoutType::nCsp16c);
|
pushSupportedPrimitiveDescriptor(LayoutType::nCsp16c);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -168,17 +170,76 @@ void MKLDNNPadNode::createPrimitive() {
|
|||||||
auto& dstMemPtr = getChildEdgeAt(0)->getMemoryPtr();
|
auto& dstMemPtr = getChildEdgeAt(0)->getMemoryPtr();
|
||||||
auto& srcMemPtr = getParentEdgeAt(0)->getMemoryPtr();
|
auto& srcMemPtr = getParentEdgeAt(0)->getMemoryPtr();
|
||||||
if (!dstMemPtr || !dstMemPtr->GetPrimitivePtr())
|
if (!dstMemPtr || !dstMemPtr->GetPrimitivePtr())
|
||||||
IE_THROW() << "Destination memory for Pad " << getName() << " didn't allocate.";
|
THROW_ERROR << "has not allocated source memory.";
|
||||||
if (!srcMemPtr || !srcMemPtr->GetPrimitivePtr())
|
if (!srcMemPtr || !srcMemPtr->GetPrimitivePtr())
|
||||||
IE_THROW() << "Input memory for Pad " << getName() << " didn't allocate.";
|
THROW_ERROR << "has not allocated destination memory.";
|
||||||
if (getSelectedPrimitiveDescriptor() == nullptr)
|
if (getSelectedPrimitiveDescriptor() == nullptr)
|
||||||
IE_THROW() << "Preferable primitive descriptor for Pad " << getName() << " is not set.";
|
THROW_ERROR << "has unidentified preferable primitive descriptor";
|
||||||
|
|
||||||
params.sizeData = this->getSelectedPrimitiveDescriptor()->getConfig().inConfs[0].desc->getPrecision().size();
|
// pads are constant, so we can calculate new collapsing pads for first target dimensions and use it for the next dimensions
|
||||||
|
// to avoid permanent identical pad calculations
|
||||||
|
const size_t blockSize = srcMemPtr->getDesc().hasLayoutType(LayoutType::nCsp16c) ? 16 :
|
||||||
|
(srcMemPtr->getDesc().hasLayoutType(LayoutType::nCsp8c) ? 8 : 1);
|
||||||
|
if (blockSize > 1) {
|
||||||
|
attrs.padsBegin[1] /= blockSize;
|
||||||
|
attrs.padsEnd[1] /= blockSize;
|
||||||
|
attrs.padsBegin.push_back(0);
|
||||||
|
attrs.padsEnd.push_back(0);
|
||||||
|
} else {
|
||||||
|
auto order = getParentEdgeAt(0)->getMemory().GetDescWithType<BlockedMemoryDesc>()->getOrder();
|
||||||
|
std::vector<unsigned int> newPadsBegin(attrs.padsBegin.size(), 0), newPadsEnd(attrs.padsEnd.size(), 0);
|
||||||
|
for (size_t i = 0; i < attrs.padsBegin.size(); ++i) {
|
||||||
|
newPadsBegin[i] = attrs.padsBegin[order[i]];
|
||||||
|
newPadsEnd[i] = attrs.padsEnd[order[i]];
|
||||||
|
}
|
||||||
|
attrs.padsBegin = newPadsBegin;
|
||||||
|
attrs.padsEnd = newPadsEnd;
|
||||||
|
}
|
||||||
|
|
||||||
const auto inBlkDesc = getParentEdgeAt(0)->getMemory().GetDescWithType<BlockedMemoryDesc>();
|
// collapse dimensions
|
||||||
params.srcDims = inBlkDesc->getBlockDims();
|
attrs.beginPadIdx = 0;
|
||||||
params.dstDims = getChildEdgeAt(0)->getMemory().GetDescWithType<BlockedMemoryDesc>()->getBlockDims();
|
attrs.endPadIdx = attrs.padsBegin.size() - 1;
|
||||||
|
|
||||||
|
for (int i = 0; i < attrs.padsBegin.size(); ++i) {
|
||||||
|
if (attrs.padsBegin[i] != 0 || attrs.padsEnd[i] != 0) {
|
||||||
|
attrs.beginPadIdx = i - 1;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (int i = attrs.padsBegin.size() - 1; i >= 0; --i) {
|
||||||
|
if (attrs.padsBegin[i] != 0 || attrs.padsEnd[i] != 0) {
|
||||||
|
attrs.endPadIdx = i;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (attrs.beginPadIdx > 0) {
|
||||||
|
attrs.padsBegin.erase(attrs.padsBegin.begin() + 1, attrs.padsBegin.begin() + attrs.beginPadIdx + 1);
|
||||||
|
attrs.padsEnd.erase(attrs.padsEnd.begin() + 1, attrs.padsEnd.begin() + attrs.beginPadIdx + 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
attrs.prc = srcMemPtr->getDesc().getPrecision();
|
||||||
|
|
||||||
|
if (inputShapesDefined()) {
|
||||||
|
prepareParams();
|
||||||
|
updateLastInputDims();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void MKLDNNPadNode::prepareParams() {
|
||||||
|
execPtr = std::make_shared<PadExecutor>(attrs,
|
||||||
|
getParentEdgeAt(0)->getMemoryPtr()->GetDescWithType<BlockedMemoryDesc>()->getBlockDims(),
|
||||||
|
getChildEdgeAt(0)->getMemoryPtr()->GetDescWithType<BlockedMemoryDesc>()->getBlockDims());
|
||||||
|
}
|
||||||
|
|
||||||
|
MKLDNNPadNode::PadExecutor::PadExecutor(const PadAttrs& attrs,
|
||||||
|
const VectorDims& srcDims,
|
||||||
|
const VectorDims& dstDims) {
|
||||||
|
params.attrs = attrs;
|
||||||
|
params.srcDims = srcDims;
|
||||||
|
params.dstDims = dstDims;
|
||||||
|
params.dataSize = attrs.prc.size();
|
||||||
|
|
||||||
size_t nDims = params.srcDims.size();
|
size_t nDims = params.srcDims.size();
|
||||||
params.srcStrides.resize(nDims, 1);
|
params.srcStrides.resize(nDims, 1);
|
||||||
@ -188,45 +249,11 @@ void MKLDNNPadNode::createPrimitive() {
|
|||||||
params.dstStrides[i] = params.dstStrides[i + 1] * params.dstDims[i + 1];
|
params.dstStrides[i] = params.dstStrides[i + 1] * params.dstDims[i + 1];
|
||||||
}
|
}
|
||||||
|
|
||||||
if (getParentEdgeAt(0)->getMemory().getDesc().hasLayoutType(LayoutType::nCsp16c) ||
|
params.lastDstDim = params.dstStrides[std::max(params.attrs.endPadIdx - 1, 0)];
|
||||||
getParentEdgeAt(0)->getMemory().getDesc().hasLayoutType(LayoutType::nCsp8c)) {
|
params.nDimsForWork = params.attrs.endPadIdx - std::max(params.attrs.beginPadIdx, 0);
|
||||||
padsBegin[1] /= params.srcDims[params.srcDims.size() - 1];
|
|
||||||
padsEnd[1] /= params.srcDims[params.srcDims.size() - 1];
|
|
||||||
padsBegin.push_back(0);
|
|
||||||
padsEnd.push_back(0);
|
|
||||||
} else {
|
|
||||||
auto order = inBlkDesc->getOrder();
|
|
||||||
std::vector<unsigned int> newPadsBegin(padsBegin.size(), 0), newPadsEnd(padsEnd.size(), 0);
|
|
||||||
for (size_t i = 0; i < padsBegin.size(); ++i) {
|
|
||||||
newPadsBegin[i] = padsBegin[order[i]];
|
|
||||||
newPadsEnd[i] = padsEnd[order[i]];
|
|
||||||
}
|
|
||||||
padsBegin = newPadsBegin;
|
|
||||||
padsEnd = newPadsEnd;
|
|
||||||
}
|
|
||||||
|
|
||||||
int beginIdx = 0;
|
|
||||||
int endIdx = padsBegin.size() - 1;
|
|
||||||
|
|
||||||
for (int i = 0; i < padsBegin.size(); ++i) {
|
|
||||||
if (padsBegin[i] != 0 || padsEnd[i] != 0) {
|
|
||||||
beginIdx = i - 1;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for (int i = padsBegin.size() - 1; i >= 0; --i) {
|
|
||||||
if (padsBegin[i] != 0 || padsEnd[i] != 0) {
|
|
||||||
endIdx = i;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
params.lastDstDim = params.dstStrides[std::max(endIdx - 1, 0)];
|
|
||||||
params.nDimsForWork = endIdx - std::max(beginIdx, 0);
|
|
||||||
params.nThreads = params.nDimsForWork > 0 ? 0 : 1;
|
params.nThreads = params.nDimsForWork > 0 ? 0 : 1;
|
||||||
params.workAmount = params.nDimsForWork > 0 ? params.dstDims[0] : 1lu;
|
params.workAmount = params.nDimsForWork > 0 ? params.dstDims[0] : 1lu;
|
||||||
for (int i = 1; i <= beginIdx; ++i) {
|
for (int i = 1; i <= params.attrs.beginPadIdx; ++i) {
|
||||||
params.workAmount *= params.dstDims[i];
|
params.workAmount *= params.dstDims[i];
|
||||||
params.dstDims[0] *= params.dstDims[i];
|
params.dstDims[0] *= params.dstDims[i];
|
||||||
params.srcDims[0] *= params.srcDims[i];
|
params.srcDims[0] *= params.srcDims[i];
|
||||||
@ -234,51 +261,62 @@ void MKLDNNPadNode::createPrimitive() {
|
|||||||
params.srcStrides[0] /= params.srcDims[i];
|
params.srcStrides[0] /= params.srcDims[i];
|
||||||
}
|
}
|
||||||
|
|
||||||
if (beginIdx > 0) {
|
if (params.attrs.beginPadIdx > 0) {
|
||||||
beginIdx++;
|
params.attrs.beginPadIdx++;
|
||||||
params.dstDims.erase(params.dstDims.begin() + 1, params.dstDims.begin() + beginIdx);
|
params.dstDims.erase(params.dstDims.begin() + 1, params.dstDims.begin() + params.attrs.beginPadIdx);
|
||||||
params.srcDims.erase(params.srcDims.begin() + 1, params.srcDims.begin() + beginIdx);
|
params.srcDims.erase(params.srcDims.begin() + 1, params.srcDims.begin() + params.attrs.beginPadIdx);
|
||||||
params.dstStrides.erase(params.dstStrides.begin() + 1, params.dstStrides.begin() + beginIdx);
|
params.dstStrides.erase(params.dstStrides.begin() + 1, params.dstStrides.begin() + params.attrs.beginPadIdx);
|
||||||
params.srcStrides.erase(params.srcStrides.begin() + 1, params.srcStrides.begin() + beginIdx);
|
params.srcStrides.erase(params.srcStrides.begin() + 1, params.srcStrides.begin() + params.attrs.beginPadIdx);
|
||||||
padsBegin.erase(padsBegin.begin() + 1, padsBegin.begin() + beginIdx);
|
|
||||||
padsEnd.erase(padsEnd.begin() + 1, padsEnd.begin() + beginIdx);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
params.workAmount = params.workAmount * params.dstStrides[0] / params.lastDstDim;
|
params.workAmount = params.workAmount * params.dstStrides[0] / params.lastDstDim;
|
||||||
params.shift = params.dstStrides[params.nDimsForWork];
|
params.shift = params.dstStrides[params.nDimsForWork];
|
||||||
if (padMode != CONSTANT || (padMode == CONSTANT && padValue == 0)) {
|
if (params.attrs.padMode != CONSTANT || (params.attrs.padMode == CONSTANT && params.attrs.padValue == 0)) {
|
||||||
params.lastDstDim *= params.sizeData;
|
params.lastDstDim *= params.dataSize;
|
||||||
params.shift *= params.sizeData;
|
params.shift *= params.dataSize;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
params.srcODims.clear();
|
||||||
for (size_t i = 0; i < params.srcDims.size(); ++i)
|
for (size_t i = 0; i < params.srcDims.size(); ++i)
|
||||||
params.srcODims.push_back(padsBegin[i] + params.srcDims[i]);
|
params.srcODims.push_back(params.attrs.padsBegin[i] + params.srcDims[i]);
|
||||||
|
|
||||||
if (padMode == REFLECT || padMode == SYMMETRIC) {
|
params.srcDimsForReflectOrSymmetric.clear();
|
||||||
int shift = padMode == SYMMETRIC ? 1 : 0;
|
if (params.attrs.padMode == REFLECT || params.attrs.padMode == SYMMETRIC) {
|
||||||
|
int shift = params.attrs.padMode == SYMMETRIC ? 1 : 0;
|
||||||
for (size_t i = 0; i < params.srcDims.size(); ++i)
|
for (size_t i = 0; i < params.srcDims.size(); ++i)
|
||||||
params.srcDimsForReflectOrSymmetric.push_back(params.srcDims[i] + params.srcODims[i] - 2 + shift);
|
params.srcDimsForReflectOrSymmetric.push_back(params.srcDims[i] + params.srcODims[i] - 2 + shift);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void MKLDNNPadNode::execute(mkldnn::stream strm) {
|
void MKLDNNPadNode::PadExecutor::exec(MKLDNNMemoryPtr& srcMemPtr, MKLDNNMemoryPtr& dstMemPtr) {
|
||||||
switch (padMode) {
|
switch (params.attrs.padMode) {
|
||||||
case CONSTANT:
|
case CONSTANT:
|
||||||
padConstant();
|
padConstant(srcMemPtr, dstMemPtr);
|
||||||
break;
|
break;
|
||||||
case EDGE:
|
case EDGE:
|
||||||
padEdge();
|
padEdge(srcMemPtr, dstMemPtr);
|
||||||
break;
|
break;
|
||||||
case REFLECT:
|
case REFLECT:
|
||||||
padReflectOrSymmetric();
|
padReflectOrSymmetric(srcMemPtr, dstMemPtr);
|
||||||
break;
|
break;
|
||||||
case SYMMETRIC:
|
case SYMMETRIC:
|
||||||
padReflectOrSymmetric(true);
|
padReflectOrSymmetric(srcMemPtr, dstMemPtr, true);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline size_t parallel_init(size_t start, size_t nDims, const SizeVector& dims, SizeVector& indexes) {
|
void MKLDNNPadNode::execute(mkldnn::stream strm) {
|
||||||
|
if (!execPtr)
|
||||||
|
THROW_ERROR << "has not compiled executor.";
|
||||||
|
|
||||||
|
execPtr->exec(getParentEdgeAt(0)->getMemoryPtr(), getChildEdgeAt(0)->getMemoryPtr());
|
||||||
|
}
|
||||||
|
|
||||||
|
void MKLDNNPadNode::executeDynamicImpl(mkldnn::stream strm) {
|
||||||
|
execute(strm);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline size_t parallel_init(size_t start, size_t nDims, const VectorDims& dims, VectorDims& indexes) {
|
||||||
for (int j = nDims - 1; j >= 0; j--) {
|
for (int j = nDims - 1; j >= 0; j--) {
|
||||||
indexes[j] = start % dims[j];
|
indexes[j] = start % dims[j];
|
||||||
start = start / dims[j];
|
start = start / dims[j];
|
||||||
@ -286,7 +324,7 @@ static inline size_t parallel_init(size_t start, size_t nDims, const SizeVector&
|
|||||||
return start;
|
return start;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void parallel_step(size_t nDims, const SizeVector& dims, SizeVector& indexes) {
|
static inline void parallel_step(size_t nDims, const VectorDims& dims, VectorDims& indexes) {
|
||||||
for (int j = nDims - 1; j >= 0; --j) {
|
for (int j = nDims - 1; j >= 0; --j) {
|
||||||
++indexes[j];
|
++indexes[j];
|
||||||
if (indexes[j] < dims[j])
|
if (indexes[j] < dims[j])
|
||||||
@ -296,17 +334,14 @@ static inline void parallel_step(size_t nDims, const SizeVector& dims, SizeVecto
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void MKLDNNPadNode::padConstant() {
|
void MKLDNNPadNode::PadExecutor::padConstant(MKLDNNMemoryPtr& srcMemPtr, MKLDNNMemoryPtr& dstMemPtr) {
|
||||||
if (padValue == 0) {
|
if (params.attrs.padValue == 0) {
|
||||||
padConstantZero();
|
padConstantZero(srcMemPtr, dstMemPtr);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
auto selectedPrimitiveDescriptor = getSelectedPrimitiveDescriptor();
|
PadContext ctx { this, srcMemPtr, dstMemPtr };
|
||||||
if (!selectedPrimitiveDescriptor)
|
OV_SWITCH(MKLDNNPlugin, PadConstantEmitter, ctx, params.attrs.prc,
|
||||||
IE_THROW() << "CPU Pad node with name '" << getName() << "' doesn't have primitive descriptors.";
|
|
||||||
InferenceEngine::Precision precision = selectedPrimitiveDescriptor->getConfig().inConfs[0].desc->getPrecision();
|
|
||||||
OV_SWITCH(MKLDNNPlugin, PadConstantEmitter, this, precision,
|
|
||||||
OV_CASE(InferenceEngine::Precision::FP32, float),
|
OV_CASE(InferenceEngine::Precision::FP32, float),
|
||||||
OV_CASE(InferenceEngine::Precision::I32, int32_t),
|
OV_CASE(InferenceEngine::Precision::I32, int32_t),
|
||||||
OV_CASE(InferenceEngine::Precision::BF16, bfloat16_t),
|
OV_CASE(InferenceEngine::Precision::BF16, bfloat16_t),
|
||||||
@ -315,18 +350,18 @@ void MKLDNNPadNode::padConstant() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
template<typename T>
|
template<typename T>
|
||||||
void MKLDNNPadNode::padConstantCommon() {
|
void MKLDNNPadNode::PadExecutor::padConstantCommon(MKLDNNMemoryPtr& srcMemPtr, MKLDNNMemoryPtr& dstMemPtr) {
|
||||||
const T* srcData = reinterpret_cast<const T*>(this->getParentEdgeAt(0)->getMemoryPtr()->GetPtr());
|
const T* srcData = reinterpret_cast<const T*>(srcMemPtr->GetPtr());
|
||||||
T* dstData = reinterpret_cast<T*>(this->getChildEdgeAt(0)->getMemoryPtr()->GetPtr());
|
T* dstData = reinterpret_cast<T*>(dstMemPtr->GetPtr());
|
||||||
const T value = static_cast<T>(padValue);
|
const T value = static_cast<T>(params.attrs.padValue);
|
||||||
|
|
||||||
const size_t beginShift = padsBegin[params.nDimsForWork] * params.shift;
|
const size_t beginShift = params.attrs.padsBegin[params.nDimsForWork] * params.shift;
|
||||||
const size_t copySize = params.srcDims[params.nDimsForWork] * params.shift;
|
const size_t copySize = params.srcDims[params.nDimsForWork] * params.shift;
|
||||||
const size_t endShift = padsEnd[params.nDimsForWork] * params.shift;
|
const size_t endShift = params.attrs.padsEnd[params.nDimsForWork] * params.shift;
|
||||||
|
|
||||||
parallel_nt(params.nThreads, [&](const int ithr, const int nthr) {
|
parallel_nt(params.nThreads, [&](const int ithr, const int nthr) {
|
||||||
size_t start = 0, end = 0;
|
size_t start = 0, end = 0;
|
||||||
SizeVector indexes(params.nDimsForWork, 0);
|
VectorDims indexes(params.nDimsForWork, 0);
|
||||||
splitter(params.workAmount, nthr, ithr, start, end);
|
splitter(params.workAmount, nthr, ithr, start, end);
|
||||||
|
|
||||||
parallel_init(start, params.nDimsForWork, params.dstDims, indexes);
|
parallel_init(start, params.nDimsForWork, params.dstDims, indexes);
|
||||||
@ -336,7 +371,7 @@ void MKLDNNPadNode::padConstantCommon() {
|
|||||||
for (size_t iwork = start; iwork < end; ++iwork, dstIdx += params.lastDstDim) {
|
for (size_t iwork = start; iwork < end; ++iwork, dstIdx += params.lastDstDim) {
|
||||||
size_t j = 0;
|
size_t j = 0;
|
||||||
for (; j < params.nDimsForWork; ++j) {
|
for (; j < params.nDimsForWork; ++j) {
|
||||||
if (indexes[j] < padsBegin[j] || indexes[j] >= params.srcODims[j])
|
if (indexes[j] < params.attrs.padsBegin[j] || indexes[j] >= params.srcODims[j])
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -348,10 +383,10 @@ void MKLDNNPadNode::padConstantCommon() {
|
|||||||
|
|
||||||
size_t srcIdx = 0;
|
size_t srcIdx = 0;
|
||||||
for (size_t idx = 0; idx < params.nDimsForWork; ++idx)
|
for (size_t idx = 0; idx < params.nDimsForWork; ++idx)
|
||||||
srcIdx += (indexes[idx] - padsBegin[idx]) * params.srcStrides[idx];
|
srcIdx += (indexes[idx] - params.attrs.padsBegin[idx]) * params.srcStrides[idx];
|
||||||
|
|
||||||
std::fill_n(&dstData[dstIdx], beginShift, value);
|
std::fill_n(&dstData[dstIdx], beginShift, value);
|
||||||
cpu_memcpy(&dstData[dstIdx + beginShift], &srcData[srcIdx], copySize * params.sizeData);
|
cpu_memcpy(&dstData[dstIdx + beginShift], &srcData[srcIdx], copySize * params.dataSize);
|
||||||
std::fill_n(&dstData[dstIdx + beginShift + copySize], endShift, value);
|
std::fill_n(&dstData[dstIdx + beginShift + copySize], endShift, value);
|
||||||
|
|
||||||
parallel_step(params.nDimsForWork, params.dstDims, indexes);
|
parallel_step(params.nDimsForWork, params.dstDims, indexes);
|
||||||
@ -359,27 +394,28 @@ void MKLDNNPadNode::padConstantCommon() {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
void MKLDNNPadNode::padConstantZero() {
|
void MKLDNNPadNode::PadExecutor::padConstantZero(MKLDNNMemoryPtr& srcMemPtr, MKLDNNMemoryPtr& dstMemPtr) {
|
||||||
const uint8_t* srcData = reinterpret_cast<const uint8_t*>(this->getParentEdgeAt(0)->getMemoryPtr()->GetPtr());
|
const uint8_t* srcData = reinterpret_cast<const uint8_t*>(srcMemPtr->GetPtr());
|
||||||
uint8_t* dstData = reinterpret_cast<uint8_t*>(this->getChildEdgeAt(0)->getMemoryPtr()->GetPtr());
|
uint8_t* dstData = reinterpret_cast<uint8_t*>(dstMemPtr->GetPtr());
|
||||||
|
|
||||||
const size_t beginShift = padsBegin[params.nDimsForWork] * params.shift;
|
const size_t beginShift = params.attrs.padsBegin[params.nDimsForWork] * params.shift;
|
||||||
const size_t copySize = params.srcDims[params.nDimsForWork] * params.shift;
|
const size_t copySize = params.srcDims[params.nDimsForWork] * params.shift;
|
||||||
const size_t endShift = padsEnd[params.nDimsForWork] * params.shift;
|
const size_t endShift = params.attrs.padsEnd[params.nDimsForWork] * params.shift;
|
||||||
|
|
||||||
parallel_nt(params.nThreads, [&](const int ithr, const int nthr) {
|
parallel_nt(params.nThreads, [&](const int ithr, const int nthr) {
|
||||||
size_t start = 0, end = 0;
|
size_t start = 0, end = 0;
|
||||||
SizeVector indexes(params.nDimsForWork, 0);
|
VectorDims indexes(params.nDimsForWork, 0);
|
||||||
splitter(params.workAmount, nthr, ithr, start, end);
|
splitter(params.workAmount, nthr, ithr, start, end);
|
||||||
|
|
||||||
parallel_init(start, params.nDimsForWork, params.dstDims, indexes);
|
parallel_init(start, params.nDimsForWork, params.dstDims, indexes);
|
||||||
size_t dstIdx = 0;
|
size_t dstIdx = 0;
|
||||||
getDstIdx(indexes, dstIdx);
|
getDstIdx(indexes, dstIdx);
|
||||||
|
dstIdx *= params.dataSize;
|
||||||
|
|
||||||
for (size_t iwork = start; iwork < end; ++iwork, dstIdx += params.lastDstDim) {
|
for (size_t iwork = start; iwork < end; ++iwork, dstIdx += params.lastDstDim) {
|
||||||
size_t j = 0;
|
size_t j = 0;
|
||||||
for (; j < params.nDimsForWork; ++j) {
|
for (; j < params.nDimsForWork; ++j) {
|
||||||
if (indexes[j] < padsBegin[j] || indexes[j] >= params.srcODims[j])
|
if (indexes[j] < params.attrs.padsBegin[j] || indexes[j] >= params.srcODims[j])
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -391,8 +427,8 @@ void MKLDNNPadNode::padConstantZero() {
|
|||||||
|
|
||||||
size_t srcIdx = 0;
|
size_t srcIdx = 0;
|
||||||
for (size_t idx = 0; idx < params.nDimsForWork; ++idx)
|
for (size_t idx = 0; idx < params.nDimsForWork; ++idx)
|
||||||
srcIdx += (indexes[idx] - padsBegin[idx]) * params.srcStrides[idx];
|
srcIdx += (indexes[idx] - params.attrs.padsBegin[idx]) * params.srcStrides[idx];
|
||||||
srcIdx *= params.sizeData;
|
srcIdx *= params.dataSize;
|
||||||
|
|
||||||
memset(&dstData[dstIdx], 0, beginShift);
|
memset(&dstData[dstIdx], 0, beginShift);
|
||||||
cpu_memcpy(&dstData[dstIdx + beginShift], &srcData[srcIdx], copySize);
|
cpu_memcpy(&dstData[dstIdx + beginShift], &srcData[srcIdx], copySize);
|
||||||
@ -403,37 +439,38 @@ void MKLDNNPadNode::padConstantZero() {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
void MKLDNNPadNode::padEdge() {
|
void MKLDNNPadNode::PadExecutor::padEdge(MKLDNNMemoryPtr& srcMemPtr, MKLDNNMemoryPtr& dstMemPtr) {
|
||||||
const uint8_t* srcData = reinterpret_cast<const uint8_t*>(this->getParentEdgeAt(0)->getMemoryPtr()->GetPtr());
|
const uint8_t* srcData = reinterpret_cast<const uint8_t*>(srcMemPtr->GetPtr());
|
||||||
uint8_t* dstData = reinterpret_cast<uint8_t*>(this->getChildEdgeAt(0)->getMemoryPtr()->GetPtr());
|
uint8_t* dstData = reinterpret_cast<uint8_t*>(dstMemPtr->GetPtr());
|
||||||
|
|
||||||
const size_t beginShift = padsBegin[params.nDimsForWork] * params.shift;
|
const size_t beginShift = params.attrs.padsBegin[params.nDimsForWork] * params.shift;
|
||||||
const size_t copySize = params.srcDims[params.nDimsForWork] * params.shift;
|
const size_t copySize = params.srcDims[params.nDimsForWork] * params.shift;
|
||||||
|
|
||||||
parallel_nt(params.nThreads, [&](const int ithr, const int nthr) {
|
parallel_nt(params.nThreads, [&](const int ithr, const int nthr) {
|
||||||
size_t start = 0, end = 0;
|
size_t start = 0, end = 0;
|
||||||
SizeVector indexes(params.nDimsForWork, 0);
|
VectorDims indexes(params.nDimsForWork, 0);
|
||||||
splitter(params.workAmount, nthr, ithr, start, end);
|
splitter(params.workAmount, nthr, ithr, start, end);
|
||||||
|
|
||||||
parallel_init(start, params.nDimsForWork, params.dstDims, indexes);
|
parallel_init(start, params.nDimsForWork, params.dstDims, indexes);
|
||||||
size_t dstIdx = 0;
|
size_t dstIdx = 0;
|
||||||
getDstIdx(indexes, dstIdx);
|
getDstIdx(indexes, dstIdx);
|
||||||
|
dstIdx *= params.dataSize;
|
||||||
|
|
||||||
for (size_t iwork = start; iwork < end; ++iwork, dstIdx += params.lastDstDim) {
|
for (size_t iwork = start; iwork < end; ++iwork, dstIdx += params.lastDstDim) {
|
||||||
size_t srcIdx = 0;
|
size_t srcIdx = 0;
|
||||||
for (size_t idx = 0; idx < params.nDimsForWork; ++idx) {
|
for (size_t idx = 0; idx < params.nDimsForWork; ++idx) {
|
||||||
size_t shift = (indexes[idx] < padsBegin[idx]) ? 0 :
|
size_t shift = (indexes[idx] < params.attrs.padsBegin[idx]) ? 0 :
|
||||||
((indexes[idx] >= params.srcODims[idx]) ? (params.srcDims[idx] - 1) : (indexes[idx] - padsBegin[idx]));
|
((indexes[idx] >= params.srcODims[idx]) ? (params.srcDims[idx] - 1) : (indexes[idx] - params.attrs.padsBegin[idx]));
|
||||||
srcIdx += shift * params.srcStrides[idx];
|
srcIdx += shift * params.srcStrides[idx];
|
||||||
}
|
}
|
||||||
srcIdx *= params.sizeData;
|
srcIdx *= params.dataSize;
|
||||||
|
|
||||||
for (size_t i = 0; i < padsBegin[params.nDimsForWork]; ++i)
|
for (size_t i = 0; i < params.attrs.padsBegin[params.nDimsForWork]; ++i)
|
||||||
cpu_memcpy(&dstData[dstIdx + i * params.shift], &srcData[srcIdx], params.shift);
|
cpu_memcpy(&dstData[dstIdx + i * params.shift], &srcData[srcIdx], params.shift);
|
||||||
|
|
||||||
cpu_memcpy(&dstData[dstIdx + beginShift], &srcData[srcIdx], copySize);
|
cpu_memcpy(&dstData[dstIdx + beginShift], &srcData[srcIdx], copySize);
|
||||||
|
|
||||||
for (size_t i = 0; i < padsEnd[params.nDimsForWork]; ++i)
|
for (size_t i = 0; i < params.attrs.padsEnd[params.nDimsForWork]; ++i)
|
||||||
cpu_memcpy(&dstData[dstIdx + beginShift + copySize + i * params.shift],
|
cpu_memcpy(&dstData[dstIdx + beginShift + copySize + i * params.shift],
|
||||||
&srcData[srcIdx + (params.srcDims[params.nDimsForWork] - 1) * params.shift], params.shift);
|
&srcData[srcIdx + (params.srcDims[params.nDimsForWork] - 1) * params.shift], params.shift);
|
||||||
|
|
||||||
@ -442,38 +479,40 @@ void MKLDNNPadNode::padEdge() {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
void MKLDNNPadNode::padReflectOrSymmetric(const bool isSymmetric) {
|
void MKLDNNPadNode::PadExecutor::padReflectOrSymmetric(MKLDNNMemoryPtr& srcMemPtr, MKLDNNMemoryPtr& dstMemPtr, const bool isSymmetric) {
|
||||||
const uint8_t* srcData = reinterpret_cast<const uint8_t*>(this->getParentEdgeAt(0)->getMemoryPtr()->GetPtr());
|
const uint8_t* srcData = reinterpret_cast<const uint8_t*>(srcMemPtr->GetPtr());
|
||||||
uint8_t* dstData = reinterpret_cast<uint8_t*>(this->getChildEdgeAt(0)->getMemoryPtr()->GetPtr());
|
uint8_t* dstData = reinterpret_cast<uint8_t*>(dstMemPtr->GetPtr());
|
||||||
size_t shift = isSymmetric ? 1 : 0;
|
size_t shift = isSymmetric ? 1 : 0;
|
||||||
|
|
||||||
parallel_nt(params.nThreads, [&](const int ithr, const int nthr) {
|
parallel_nt(params.nThreads, [&](const int ithr, const int nthr) {
|
||||||
size_t start = 0, end = 0;
|
size_t start = 0, end = 0;
|
||||||
SizeVector indexes(params.nDimsForWork, 0);
|
VectorDims indexes(params.nDimsForWork, 0);
|
||||||
splitter(params.workAmount, nthr, ithr, start, end);
|
splitter(params.workAmount, nthr, ithr, start, end);
|
||||||
|
|
||||||
parallel_init(start, params.nDimsForWork, params.dstDims, indexes);
|
parallel_init(start, params.nDimsForWork, params.dstDims, indexes);
|
||||||
size_t dstIdx = 0;
|
size_t dstIdx = 0;
|
||||||
getDstIdx(indexes, dstIdx);
|
getDstIdx(indexes, dstIdx);
|
||||||
|
dstIdx *= params.dataSize;
|
||||||
|
|
||||||
for (size_t iwork = start; iwork < end; ++iwork, dstIdx += params.lastDstDim) {
|
for (size_t iwork = start; iwork < end; ++iwork, dstIdx += params.lastDstDim) {
|
||||||
size_t srcIdx = 0;
|
size_t srcIdx = 0;
|
||||||
for (size_t i = 0; i < params.nDimsForWork; ++i) {
|
for (size_t i = 0; i < params.nDimsForWork; ++i) {
|
||||||
size_t idx = (indexes[i] < padsBegin[i]) ? (padsBegin[i] - indexes[i] - shift) :
|
size_t idx = (indexes[i] < params.attrs.padsBegin[i]) ? (params.attrs.padsBegin[i] - indexes[i] - shift) :
|
||||||
((indexes[i] >= params.srcODims[i]) ? (params.srcDimsForReflectOrSymmetric[i] - indexes[i]) : (indexes[i] - padsBegin[i]));
|
((indexes[i] >= params.srcODims[i]) ? (params.srcDimsForReflectOrSymmetric[i] - indexes[i]) :
|
||||||
|
(indexes[i] - params.attrs.padsBegin[i]));
|
||||||
srcIdx += idx * params.srcStrides[i];
|
srcIdx += idx * params.srcStrides[i];
|
||||||
}
|
}
|
||||||
srcIdx *= params.sizeData;
|
srcIdx *= params.dataSize;
|
||||||
|
|
||||||
for (size_t i = 0; i < padsBegin[params.nDimsForWork]; ++i)
|
for (size_t i = 0; i < params.attrs.padsBegin[params.nDimsForWork]; ++i)
|
||||||
cpu_memcpy(&dstData[dstIdx + i * params.shift],
|
cpu_memcpy(&dstData[dstIdx + i * params.shift],
|
||||||
&srcData[srcIdx + (padsBegin[params.nDimsForWork] - shift - i) * params.shift], params.shift);
|
&srcData[srcIdx + (params.attrs.padsBegin[params.nDimsForWork] - shift - i) * params.shift], params.shift);
|
||||||
|
|
||||||
cpu_memcpy(&dstData[dstIdx + padsBegin[params.nDimsForWork] * params.shift], &srcData[srcIdx],
|
cpu_memcpy(&dstData[dstIdx + params.attrs.padsBegin[params.nDimsForWork] * params.shift], &srcData[srcIdx],
|
||||||
params.srcDims[params.nDimsForWork] * params.shift);
|
params.srcDims[params.nDimsForWork] * params.shift);
|
||||||
|
|
||||||
size_t srcShift = (params.srcDimsForReflectOrSymmetric[params.nDimsForWork] - params.srcODims[params.nDimsForWork]) * params.shift;
|
size_t srcShift = (params.srcDimsForReflectOrSymmetric[params.nDimsForWork] - params.srcODims[params.nDimsForWork]) * params.shift;
|
||||||
for (size_t i = 0; i < padsEnd[params.nDimsForWork]; ++i)
|
for (size_t i = 0; i < params.attrs.padsEnd[params.nDimsForWork]; ++i)
|
||||||
cpu_memcpy(&dstData[dstIdx + (params.srcODims[params.nDimsForWork] + i) * params.shift],
|
cpu_memcpy(&dstData[dstIdx + (params.srcODims[params.nDimsForWork] + i) * params.shift],
|
||||||
&srcData[srcIdx + srcShift - i * params.shift], params.shift);
|
&srcData[srcIdx + srcShift - i * params.shift], params.shift);
|
||||||
|
|
||||||
@ -482,10 +521,9 @@ void MKLDNNPadNode::padReflectOrSymmetric(const bool isSymmetric) {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
inline void MKLDNNPadNode::getDstIdx(const InferenceEngine::SizeVector& indexes, size_t& dstIdx) const {
|
inline void MKLDNNPadNode::PadExecutor::getDstIdx(const VectorDims& indexes, size_t& dstIdx) const {
|
||||||
for (size_t i = 0; i < params.nDimsForWork; ++i)
|
for (size_t i = 0; i < params.nDimsForWork; ++i)
|
||||||
dstIdx += indexes[i] * params.dstStrides[i];
|
dstIdx += indexes[i] * params.dstStrides[i];
|
||||||
dstIdx *= (padMode == CONSTANT && padValue != 0) ? 1 : params.sizeData;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool MKLDNNPadNode::created() const {
|
bool MKLDNNPadNode::created() const {
|
||||||
|
@ -14,13 +14,17 @@ class MKLDNNPadNode : public MKLDNNNode {
|
|||||||
public:
|
public:
|
||||||
MKLDNNPadNode(const std::shared_ptr<ngraph::Node>& op, const mkldnn::engine& eng, MKLDNNWeightsSharing::Ptr &cache);
|
MKLDNNPadNode(const std::shared_ptr<ngraph::Node>& op, const mkldnn::engine& eng, MKLDNNWeightsSharing::Ptr &cache);
|
||||||
|
|
||||||
|
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept;
|
||||||
void getSupportedDescriptors() override;
|
void getSupportedDescriptors() override;
|
||||||
void initSupportedPrimitiveDescriptors() override;
|
void initSupportedPrimitiveDescriptors() override;
|
||||||
void createPrimitive() override;
|
void createPrimitive() override;
|
||||||
void execute(mkldnn::stream strm) override;
|
void execute(mkldnn::stream strm) override;
|
||||||
bool created() const override;
|
bool created() const override;
|
||||||
|
|
||||||
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept;
|
void prepareParams() override;
|
||||||
|
|
||||||
|
protected:
|
||||||
|
void executeDynamicImpl(mkldnn::stream strm) override;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
enum PadMode {
|
enum PadMode {
|
||||||
@ -30,48 +34,70 @@ private:
|
|||||||
SYMMETRIC = 3
|
SYMMETRIC = 3
|
||||||
};
|
};
|
||||||
|
|
||||||
void padConstant();
|
struct PadAttrs {
|
||||||
template<typename T> void padConstantCommon();
|
PadMode padMode = CONSTANT;
|
||||||
void padConstantZero();
|
float padValue = 0.f;
|
||||||
void padEdge();
|
std::vector<unsigned int> padsBegin;
|
||||||
void padReflectOrSymmetric(const bool isSymmetric = false);
|
std::vector<unsigned int> padsEnd;
|
||||||
|
int beginPadIdx = 0;
|
||||||
|
int endPadIdx = 0;
|
||||||
|
InferenceEngine::Precision prc;
|
||||||
|
} attrs;
|
||||||
|
|
||||||
inline void getDstIdx(const InferenceEngine::SizeVector& indexes, size_t& dstIdx) const;
|
struct PadExecutor {
|
||||||
|
PadExecutor(const PadAttrs& params, const VectorDims& srcDims, const VectorDims& dstDims);
|
||||||
|
void exec(MKLDNNMemoryPtr& srcMemPtr, MKLDNNMemoryPtr& dstMemPtr);
|
||||||
|
~PadExecutor() = default;
|
||||||
|
|
||||||
PadMode padMode = CONSTANT;
|
private:
|
||||||
float padValue = 0.f;
|
void padConstant(MKLDNNMemoryPtr& srcMemPtr, MKLDNNMemoryPtr& dstMemPtr);
|
||||||
std::vector<unsigned int> padsBegin;
|
template<typename T> void padConstantCommon(MKLDNNMemoryPtr& srcMemPtr, MKLDNNMemoryPtr& dstMemPtr);
|
||||||
std::vector<unsigned int> padsEnd;
|
void padConstantZero(MKLDNNMemoryPtr& srcMemPtr, MKLDNNMemoryPtr& dstMemPtr);
|
||||||
|
void padEdge(MKLDNNMemoryPtr& srcMemPtr, MKLDNNMemoryPtr& dstMemPtr);
|
||||||
|
void padReflectOrSymmetric(MKLDNNMemoryPtr& srcMemPtr, MKLDNNMemoryPtr& dstMemPtr, const bool isSymmetric = false);
|
||||||
|
|
||||||
struct {
|
inline void getDstIdx(const VectorDims& indexes, size_t& dstIdx) const;
|
||||||
InferenceEngine::SizeVector srcDims;
|
|
||||||
InferenceEngine::SizeVector dstDims;
|
|
||||||
InferenceEngine::SizeVector srcODims;
|
|
||||||
InferenceEngine::SizeVector srcStrides;
|
|
||||||
InferenceEngine::SizeVector dstStrides;
|
|
||||||
InferenceEngine::SizeVector srcDimsForReflectOrSymmetric;
|
|
||||||
int nThreads = 0;
|
|
||||||
size_t nDimsForWork = 0lu;
|
|
||||||
size_t workAmount = 0lu;
|
|
||||||
size_t lastDstDim = 1lu;
|
|
||||||
size_t shift = 0lu;
|
|
||||||
uint8_t sizeData = 1;
|
|
||||||
} params;
|
|
||||||
|
|
||||||
template<typename T>
|
struct PadContext {
|
||||||
struct PadConstantEmitter {
|
PadExecutor* executor;
|
||||||
void operator()(MKLDNNPadNode* node) {
|
MKLDNNMemoryPtr srcMemPtr;
|
||||||
node->padConstantCommon<T>();
|
MKLDNNMemoryPtr dstMemPtr;
|
||||||
}
|
};
|
||||||
|
|
||||||
|
template<typename T>
|
||||||
|
struct PadConstantEmitter {
|
||||||
|
void operator()(PadContext& ctx) {
|
||||||
|
ctx.executor->padConstantCommon<T>(ctx.srcMemPtr, ctx.dstMemPtr);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
struct {
|
||||||
|
PadAttrs attrs;
|
||||||
|
VectorDims srcDims;
|
||||||
|
VectorDims dstDims;
|
||||||
|
VectorDims srcODims;
|
||||||
|
VectorDims srcStrides;
|
||||||
|
VectorDims dstStrides;
|
||||||
|
VectorDims srcDimsForReflectOrSymmetric;
|
||||||
|
int nThreads = 0;
|
||||||
|
size_t nDimsForWork = 0lu;
|
||||||
|
size_t workAmount = 0lu;
|
||||||
|
size_t lastDstDim = 1lu;
|
||||||
|
size_t shift = 0lu;
|
||||||
|
size_t dataSize = 1lu;
|
||||||
|
PadMode padMode;
|
||||||
|
} params;
|
||||||
};
|
};
|
||||||
|
|
||||||
std::string errorPrefix;
|
static constexpr size_t DATA_ID = 0lu;
|
||||||
static const size_t DATA_ID = 0;
|
static constexpr size_t PADS_BEGIN_ID = 1lu;
|
||||||
static const size_t PADS_BEGIN_ID = 1;
|
static constexpr size_t PADS_END_ID = 2lu;
|
||||||
static const size_t PADS_END_ID = 2;
|
static constexpr size_t PAD_VALUE_ID = 3lu;
|
||||||
static const size_t PAD_VALUE_ID = 3;
|
|
||||||
|
|
||||||
bool isPadValueSpecified = false;
|
bool isPadValueSpecified = false;
|
||||||
|
|
||||||
|
using executorPtr = std::shared_ptr<PadExecutor>;
|
||||||
|
executorPtr execPtr = nullptr;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace MKLDNNPlugin
|
} // namespace MKLDNNPlugin
|
||||||
|
@ -4,57 +4,75 @@
|
|||||||
|
|
||||||
#include <shared_test_classes/single_layer/pad.hpp>
|
#include <shared_test_classes/single_layer/pad.hpp>
|
||||||
#include "test_utils/cpu_test_utils.hpp"
|
#include "test_utils/cpu_test_utils.hpp"
|
||||||
|
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||||
|
|
||||||
using namespace InferenceEngine;
|
using namespace InferenceEngine;
|
||||||
using namespace CPUTestUtils;
|
using namespace CPUTestUtils;
|
||||||
|
using namespace ov;
|
||||||
|
using namespace test;
|
||||||
|
|
||||||
namespace CPULayerTestsDefinitions {
|
namespace CPULayerTestsDefinitions {
|
||||||
|
|
||||||
typedef std::tuple<
|
using PadLayerCPUTestParamSet = std::tuple<
|
||||||
LayerTestsDefinitions::padLayerTestParamsSet,
|
InputShape, // Input shape
|
||||||
|
ElementType, // Input element type
|
||||||
|
std::vector<int64_t>, // padsBegin
|
||||||
|
std::vector<int64_t>, // padsEnd
|
||||||
|
float, // argPadValue
|
||||||
|
ngraph::helpers::PadMode, // padMode
|
||||||
CPUSpecificParams
|
CPUSpecificParams
|
||||||
> padLayerCPUTestParamsSet;
|
>;
|
||||||
|
|
||||||
class PadLayerCPUTest : public testing::WithParamInterface<padLayerCPUTestParamsSet>,
|
class PadLayerCPUTest : public testing::WithParamInterface<PadLayerCPUTestParamSet>,
|
||||||
virtual public LayerTestsUtils::LayerTestsCommon, public CPUTestsBase {
|
virtual public SubgraphBaseTest, public CPUTestsBase {
|
||||||
public:
|
public:
|
||||||
static std::string getTestCaseName(testing::TestParamInfo<padLayerCPUTestParamsSet> obj) {
|
static std::string getTestCaseName(testing::TestParamInfo<PadLayerCPUTestParamSet> obj) {
|
||||||
LayerTestsDefinitions::padLayerTestParamsSet basicParamsSet;
|
InputShape shapes;
|
||||||
|
ElementType elementType;
|
||||||
|
std::vector<int64_t> padsBegin, padsEnd;
|
||||||
|
ngraph::helpers::PadMode padMode;
|
||||||
|
float argPadValue;
|
||||||
CPUSpecificParams cpuParams;
|
CPUSpecificParams cpuParams;
|
||||||
std::tie(basicParamsSet, cpuParams) = obj.param;
|
std::tie(shapes, elementType, padsBegin, padsEnd, argPadValue, padMode, cpuParams) = obj.param;
|
||||||
|
|
||||||
std::ostringstream result;
|
std::ostringstream results;
|
||||||
result << LayerTestsDefinitions::PadLayerTest::getTestCaseName(testing::TestParamInfo<LayerTestsDefinitions::padLayerTestParamsSet>(
|
results << "IS=" << CommonTestUtils::partialShape2str({shapes.first}) << "_";
|
||||||
basicParamsSet, 0));
|
results << "TS=";
|
||||||
|
for (const auto& item : shapes.second) {
|
||||||
|
results << CommonTestUtils::vec2str(item) << "_";
|
||||||
|
}
|
||||||
|
results << "Prc=" << elementType << "_";
|
||||||
|
results << "padsBegin=" << CommonTestUtils::vec2str(padsBegin) << "_";
|
||||||
|
results << "padsEnd=" << CommonTestUtils::vec2str(padsEnd) << "_";
|
||||||
|
if (padMode == ngraph::helpers::PadMode::CONSTANT) {
|
||||||
|
results << "Value=" << argPadValue << "_";
|
||||||
|
}
|
||||||
|
results << "PadMode=" << padMode << "_";
|
||||||
|
results << CPUTestsBase::getTestCaseName(cpuParams);
|
||||||
|
|
||||||
result << CPUTestsBase::getTestCaseName(cpuParams);
|
return results.str();
|
||||||
|
|
||||||
return result.str();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
void SetUp() override {
|
void SetUp() override {
|
||||||
LayerTestsDefinitions::padLayerTestParamsSet basicParamsSet;
|
InputShape shapes;
|
||||||
|
ElementType elementType;
|
||||||
|
std::vector<int64_t> padsBegin, padsEnd;
|
||||||
|
ngraph::helpers::PadMode padMode;
|
||||||
|
float argPadValue;
|
||||||
CPUSpecificParams cpuParams;
|
CPUSpecificParams cpuParams;
|
||||||
std::tie(basicParamsSet, cpuParams) = this->GetParam();
|
std::tie(shapes, elementType, padsBegin, padsEnd, argPadValue, padMode, cpuParams) = this->GetParam();
|
||||||
|
|
||||||
std::tie(inFmts, outFmts, priority, selectedType) = cpuParams;
|
std::tie(inFmts, outFmts, priority, selectedType) = cpuParams;
|
||||||
|
if (selectedType.empty()) {
|
||||||
|
selectedType = getPrimitiveType();
|
||||||
|
}
|
||||||
|
selectedType = selectedType + "_" + InferenceEngine::details::convertPrecision(inType).name();
|
||||||
|
targetDevice = CommonTestUtils::DEVICE_CPU;
|
||||||
|
init_input_shapes({shapes});
|
||||||
|
|
||||||
InferenceEngine::SizeVector inputShape;
|
auto params = ngraph::builder::makeDynamicParams(elementType, inputDynamicShapes);
|
||||||
std::vector<int64_t> padsBegin, padsEnd;
|
auto pad = ngraph::builder::makePad(params[0], padsBegin, padsEnd, argPadValue, padMode);
|
||||||
float argPadValue;
|
|
||||||
ngraph::helpers::PadMode padMode;
|
|
||||||
InferenceEngine::Precision netPrecision;
|
|
||||||
std::tie(padsBegin, padsEnd, argPadValue, padMode, netPrecision, inPrc, outPrc, inLayout, inputShape, targetDevice) =
|
|
||||||
basicParamsSet;
|
|
||||||
|
|
||||||
inPrc = outPrc = netPrecision;
|
|
||||||
selectedType = std::string("ref_") + netPrecision.name();
|
|
||||||
auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
|
|
||||||
auto params = ngraph::builder::makeParams(ngPrc, {inputShape});
|
|
||||||
auto paramOuts = ngraph::helpers::convert2OutputVector(
|
|
||||||
ngraph::helpers::castOps2Nodes<ngraph::opset3::Parameter>(params));
|
|
||||||
auto pad = ngraph::builder::makePad(paramOuts[0], padsBegin, padsEnd, argPadValue, padMode);
|
|
||||||
pad->get_rt_info() = getCPUInfo();
|
pad->get_rt_info() = getCPUInfo();
|
||||||
ngraph::ResultVector results{std::make_shared<ngraph::opset3::Result>(pad)};
|
ngraph::ResultVector results{std::make_shared<ngraph::opset3::Result>(pad)};
|
||||||
function = std::make_shared<ngraph::Function>(results, params, "pad");
|
function = std::make_shared<ngraph::Function>(results, params, "pad");
|
||||||
@ -64,8 +82,10 @@ protected:
|
|||||||
TEST_P(PadLayerCPUTest, CompareWithRefs) {
|
TEST_P(PadLayerCPUTest, CompareWithRefs) {
|
||||||
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||||
|
|
||||||
Run();
|
run();
|
||||||
CheckPluginRelatedResults(executableNetwork, "Pad");
|
|
||||||
|
// TODO: need to uncomment when this method will be updated
|
||||||
|
//CheckPluginRelatedResults(executableNetwork, "Pad");
|
||||||
}
|
}
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
@ -80,11 +100,13 @@ const auto cpuParams_nCdhw8c = CPUSpecificParams {{nCdhw8c}, {nCdhw8c}, {}, {}};
|
|||||||
const auto cpuParams_nhwc = CPUSpecificParams {{nhwc}, {nhwc}, {}, {}};
|
const auto cpuParams_nhwc = CPUSpecificParams {{nhwc}, {nhwc}, {}, {}};
|
||||||
const auto cpuParams_ndhwc = CPUSpecificParams {{ndhwc}, {ndhwc}, {}, {}};
|
const auto cpuParams_ndhwc = CPUSpecificParams {{ndhwc}, {ndhwc}, {}, {}};
|
||||||
|
|
||||||
|
const auto cpuParams_nchw = CPUSpecificParams {{nchw}, {nchw}, {}, {}};
|
||||||
|
const auto cpuParams_ncdhw = CPUSpecificParams {{ncdhw}, {ncdhw}, {}, {}};
|
||||||
|
|
||||||
const std::vector<InferenceEngine::Precision> inputPrecisions = {
|
const std::vector<ElementType> inputPrecisions = {
|
||||||
InferenceEngine::Precision::FP32,
|
ElementType::f32,
|
||||||
InferenceEngine::Precision::BF16,
|
ElementType::bf16,
|
||||||
InferenceEngine::Precision::I8
|
ElementType::i8
|
||||||
};
|
};
|
||||||
|
|
||||||
const std::vector<float> argPadValue = {0.f, 1.f, 2.5f, -1.f};
|
const std::vector<float> argPadValue = {0.f, 1.f, 2.5f, -1.f};
|
||||||
@ -95,6 +117,8 @@ const std::vector<ngraph::helpers::PadMode> padMode = {
|
|||||||
ngraph::helpers::PadMode::SYMMETRIC
|
ngraph::helpers::PadMode::SYMMETRIC
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/* *======================* Static Shapes Tests 4D *======================* */
|
||||||
|
|
||||||
const std::vector<std::vector<int64_t>> padsBegin4DConstBlocked = {{0, 0, 0, 0}, {0, 0, 1, 3}, {2, 16, 1, 0}, {0, 0, 2, 0}};
|
const std::vector<std::vector<int64_t>> padsBegin4DConstBlocked = {{0, 0, 0, 0}, {0, 0, 1, 3}, {2, 16, 1, 0}, {0, 0, 2, 0}};
|
||||||
const std::vector<std::vector<int64_t>> padsEnd4DConstBlocked = {{0, 0, 0, 0}, {0, 0, 2, 1}, {2, 0, 0, 1}, {1, 32, 2, 0}};
|
const std::vector<std::vector<int64_t>> padsEnd4DConstBlocked = {{0, 0, 0, 0}, {0, 0, 2, 1}, {2, 0, 0, 1}, {1, 32, 2, 0}};
|
||||||
|
|
||||||
@ -109,95 +133,142 @@ const std::vector<CPUSpecificParams> CPUParams4DBlocked = {
|
|||||||
cpuParams_nChw8c,
|
cpuParams_nChw8c,
|
||||||
};
|
};
|
||||||
|
|
||||||
const auto pad4DConstParamsBlocked = testing::Combine(
|
|
||||||
testing::ValuesIn(padsBegin4DConstBlocked),
|
|
||||||
testing::ValuesIn(padsEnd4DConstBlocked),
|
|
||||||
testing::ValuesIn(argPadValue),
|
|
||||||
testing::Values(ngraph::helpers::PadMode::CONSTANT),
|
|
||||||
testing::ValuesIn(inputPrecisions),
|
|
||||||
testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
|
||||||
testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
|
||||||
testing::Values(InferenceEngine::Layout::ANY),
|
|
||||||
testing::Values(std::vector<size_t>{3, 16, 5, 5}),
|
|
||||||
testing::Values(CommonTestUtils::DEVICE_CPU)
|
|
||||||
);
|
|
||||||
|
|
||||||
INSTANTIATE_TEST_SUITE_P(
|
INSTANTIATE_TEST_SUITE_P(
|
||||||
smoke_CPUPad4DConstBlocked,
|
smoke_CPUPad4DConstBlocked,
|
||||||
PadLayerCPUTest,
|
PadLayerCPUTest,
|
||||||
::testing::Combine(
|
::testing::Combine(
|
||||||
pad4DConstParamsBlocked,
|
::testing::ValuesIn(static_shapes_to_test_representation({{3, 16, 5, 5}})),
|
||||||
|
::testing::ValuesIn(inputPrecisions),
|
||||||
|
::testing::ValuesIn(padsBegin4DConstBlocked),
|
||||||
|
::testing::ValuesIn(padsEnd4DConstBlocked),
|
||||||
|
::testing::ValuesIn(argPadValue),
|
||||||
|
::testing::Values(ngraph::helpers::PadMode::CONSTANT),
|
||||||
::testing::ValuesIn(CPUParams4DBlocked)),
|
::testing::ValuesIn(CPUParams4DBlocked)),
|
||||||
PadLayerCPUTest::getTestCaseName
|
PadLayerCPUTest::getTestCaseName
|
||||||
);
|
);
|
||||||
|
|
||||||
|
|
||||||
const auto pad4DConstParams = testing::Combine(
|
|
||||||
testing::ValuesIn(padsBegin4D),
|
|
||||||
testing::ValuesIn(padsEnd4D),
|
|
||||||
testing::ValuesIn(argPadValue),
|
|
||||||
testing::Values(ngraph::helpers::PadMode::CONSTANT),
|
|
||||||
testing::ValuesIn(inputPrecisions),
|
|
||||||
testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
|
||||||
testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
|
||||||
testing::Values(InferenceEngine::Layout::ANY),
|
|
||||||
testing::Values(std::vector<size_t>{3, 16, 5, 5}),
|
|
||||||
testing::Values(CommonTestUtils::DEVICE_CPU)
|
|
||||||
);
|
|
||||||
|
|
||||||
INSTANTIATE_TEST_SUITE_P(
|
INSTANTIATE_TEST_SUITE_P(
|
||||||
smoke_CPUPad4DConst,
|
smoke_CPUPad4DConst,
|
||||||
PadLayerCPUTest,
|
PadLayerCPUTest,
|
||||||
::testing::Combine(
|
::testing::Combine(
|
||||||
pad4DConstParams,
|
::testing::ValuesIn(static_shapes_to_test_representation({{3, 16, 5, 5}})),
|
||||||
|
::testing::ValuesIn(inputPrecisions),
|
||||||
|
::testing::ValuesIn(padsBegin4D),
|
||||||
|
::testing::ValuesIn(padsEnd4D),
|
||||||
|
::testing::ValuesIn(argPadValue),
|
||||||
|
::testing::Values(ngraph::helpers::PadMode::CONSTANT),
|
||||||
::testing::Values(cpuParams_nhwc)),
|
::testing::Values(cpuParams_nhwc)),
|
||||||
PadLayerCPUTest::getTestCaseName
|
PadLayerCPUTest::getTestCaseName
|
||||||
);
|
);
|
||||||
|
|
||||||
const auto pad4DParamsBlocked = testing::Combine(
|
|
||||||
testing::ValuesIn(padsBegin4DBlocked),
|
|
||||||
testing::ValuesIn(padsEnd4DBlocked),
|
|
||||||
testing::Values(0),
|
|
||||||
testing::ValuesIn(padMode),
|
|
||||||
testing::ValuesIn(inputPrecisions),
|
|
||||||
testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
|
||||||
testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
|
||||||
testing::Values(InferenceEngine::Layout::ANY),
|
|
||||||
testing::Values(std::vector<size_t>{3, 16, 10, 5}),
|
|
||||||
testing::Values(CommonTestUtils::DEVICE_CPU)
|
|
||||||
);
|
|
||||||
|
|
||||||
INSTANTIATE_TEST_SUITE_P(
|
INSTANTIATE_TEST_SUITE_P(
|
||||||
smoke_CPUPad4DBlocked,
|
smoke_CPUPad4DBlocked,
|
||||||
PadLayerCPUTest,
|
PadLayerCPUTest,
|
||||||
::testing::Combine(
|
::testing::Combine(
|
||||||
pad4DParamsBlocked,
|
::testing::ValuesIn(static_shapes_to_test_representation({{3, 16, 10, 5}})),
|
||||||
|
::testing::ValuesIn(inputPrecisions),
|
||||||
|
::testing::ValuesIn(padsBegin4DBlocked),
|
||||||
|
::testing::ValuesIn(padsEnd4DBlocked),
|
||||||
|
::testing::Values(0),
|
||||||
|
::testing::ValuesIn(padMode),
|
||||||
::testing::ValuesIn(CPUParams4DBlocked)),
|
::testing::ValuesIn(CPUParams4DBlocked)),
|
||||||
PadLayerCPUTest::getTestCaseName
|
PadLayerCPUTest::getTestCaseName
|
||||||
);
|
);
|
||||||
|
|
||||||
const auto pad4DParams = testing::Combine(
|
|
||||||
testing::ValuesIn(padsBegin4D),
|
|
||||||
testing::ValuesIn(padsEnd4D),
|
|
||||||
testing::Values(0),
|
|
||||||
testing::ValuesIn(padMode),
|
|
||||||
testing::ValuesIn(inputPrecisions),
|
|
||||||
testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
|
||||||
testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
|
||||||
testing::Values(InferenceEngine::Layout::ANY),
|
|
||||||
testing::Values(std::vector<size_t>{3, 16, 10, 5}),
|
|
||||||
testing::Values(CommonTestUtils::DEVICE_CPU)
|
|
||||||
);
|
|
||||||
|
|
||||||
INSTANTIATE_TEST_SUITE_P(
|
INSTANTIATE_TEST_SUITE_P(
|
||||||
smoke_CPUPad4D,
|
smoke_CPUPad4D,
|
||||||
PadLayerCPUTest,
|
PadLayerCPUTest,
|
||||||
::testing::Combine(
|
::testing::Combine(
|
||||||
pad4DParams,
|
::testing::ValuesIn(static_shapes_to_test_representation({{3, 16, 10, 5}})),
|
||||||
|
::testing::ValuesIn(inputPrecisions),
|
||||||
|
::testing::ValuesIn(padsBegin4DBlocked),
|
||||||
|
::testing::ValuesIn(padsEnd4DBlocked),
|
||||||
|
::testing::Values(0),
|
||||||
|
::testing::ValuesIn(padMode),
|
||||||
::testing::Values(cpuParams_nhwc)),
|
::testing::Values(cpuParams_nhwc)),
|
||||||
PadLayerCPUTest::getTestCaseName
|
PadLayerCPUTest::getTestCaseName
|
||||||
);
|
);
|
||||||
|
|
||||||
|
/* *======================* *=====================* *======================* */
|
||||||
|
|
||||||
|
/* *======================* Dynamic Shapes Tests 4D *======================* */
|
||||||
|
|
||||||
|
const std::vector<InputShape> inputShapesDynamic4D = {
|
||||||
|
{{-1, -1, -1, -1}, // dynamic
|
||||||
|
{{5, 36, 5, 5}, {3, 16, 10, 5}, {3, 24, 10, 10}}}, // target
|
||||||
|
|
||||||
|
{{-1, 32, -1, -1}, // dynamic
|
||||||
|
{{5, 32, 5, 5}, {5, 32, 5, 8}, {3, 32, 8, 8}}}, // target
|
||||||
|
|
||||||
|
{{{1, 5}, {16, 32}, {1, 16}, {1, 16}}, // dynamic
|
||||||
|
{{3, 16, 5, 5}, {5, 24, 5, 8}, {3, 32, 8, 8}}}, // target
|
||||||
|
};
|
||||||
|
|
||||||
|
const std::vector<CPUSpecificParams> CPUParams4DDynamic = {
|
||||||
|
cpuParams_nhwc,
|
||||||
|
cpuParams_nchw
|
||||||
|
};
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(
|
||||||
|
smoke_CPUPadDynamic4DConst,
|
||||||
|
PadLayerCPUTest,
|
||||||
|
::testing::Combine(
|
||||||
|
::testing::ValuesIn(inputShapesDynamic4D),
|
||||||
|
::testing::ValuesIn(inputPrecisions),
|
||||||
|
::testing::ValuesIn(padsBegin4D),
|
||||||
|
::testing::ValuesIn(padsEnd4D),
|
||||||
|
::testing::ValuesIn(argPadValue),
|
||||||
|
::testing::Values(ngraph::helpers::PadMode::CONSTANT),
|
||||||
|
::testing::ValuesIn(CPUParams4DDynamic)),
|
||||||
|
PadLayerCPUTest::getTestCaseName
|
||||||
|
);
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(
|
||||||
|
smoke_CPUPadDynamic4DConstBlocked,
|
||||||
|
PadLayerCPUTest,
|
||||||
|
::testing::Combine(
|
||||||
|
::testing::Values(inputShapesDynamic4D[1]),
|
||||||
|
::testing::ValuesIn(inputPrecisions),
|
||||||
|
::testing::ValuesIn(padsBegin4DConstBlocked),
|
||||||
|
::testing::ValuesIn(padsEnd4DConstBlocked),
|
||||||
|
::testing::ValuesIn(argPadValue),
|
||||||
|
::testing::Values(ngraph::helpers::PadMode::CONSTANT),
|
||||||
|
::testing::ValuesIn(CPUParams4DBlocked)),
|
||||||
|
PadLayerCPUTest::getTestCaseName
|
||||||
|
);
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(
|
||||||
|
smoke_CPUPadDynamic4D,
|
||||||
|
PadLayerCPUTest,
|
||||||
|
::testing::Combine(
|
||||||
|
::testing::ValuesIn(inputShapesDynamic4D),
|
||||||
|
::testing::ValuesIn(inputPrecisions),
|
||||||
|
::testing::ValuesIn(padsBegin4D),
|
||||||
|
::testing::ValuesIn(padsEnd4D),
|
||||||
|
::testing::Values(0),
|
||||||
|
::testing::ValuesIn(padMode),
|
||||||
|
::testing::ValuesIn(CPUParams4DDynamic)),
|
||||||
|
PadLayerCPUTest::getTestCaseName
|
||||||
|
);
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(
|
||||||
|
smoke_CPUPadDynamic4DBlocked,
|
||||||
|
PadLayerCPUTest,
|
||||||
|
::testing::Combine(
|
||||||
|
::testing::Values(inputShapesDynamic4D[1]),
|
||||||
|
::testing::ValuesIn(inputPrecisions),
|
||||||
|
::testing::ValuesIn(padsBegin4DBlocked),
|
||||||
|
::testing::ValuesIn(padsEnd4DBlocked),
|
||||||
|
::testing::Values(0),
|
||||||
|
::testing::ValuesIn(padMode),
|
||||||
|
::testing::ValuesIn(CPUParams4DBlocked)),
|
||||||
|
PadLayerCPUTest::getTestCaseName
|
||||||
|
);
|
||||||
|
|
||||||
|
/* *======================* *=====================* *======================* */
|
||||||
|
|
||||||
|
/* *======================* Static Shapes Tests 5D *======================* */
|
||||||
|
|
||||||
const std::vector<std::vector<int64_t>> padsBegin5DConstBlocked = {{0, 0, 0, 0, 0}, {0, 0, 1, 1, 0}, {2, 32, 1, 1, 0}, {0, 0, 1, 3, 1}, {0, 0, 0, 1, 0}};
|
const std::vector<std::vector<int64_t>> padsBegin5DConstBlocked = {{0, 0, 0, 0, 0}, {0, 0, 1, 1, 0}, {2, 32, 1, 1, 0}, {0, 0, 1, 3, 1}, {0, 0, 0, 1, 0}};
|
||||||
const std::vector<std::vector<int64_t>> padsEnd5DConstBlocked = {{0, 0, 0, 0, 0}, {1, 16, 1, 1, 0}, {0, 0, 0, 1, 0}, {0, 0, 0, 1, 1}, {0, 0, 1, 0, 1}};
|
const std::vector<std::vector<int64_t>> padsEnd5DConstBlocked = {{0, 0, 0, 0, 0}, {1, 16, 1, 1, 0}, {0, 0, 0, 1, 0}, {0, 0, 0, 1, 1}, {0, 0, 1, 0, 1}};
|
||||||
|
|
||||||
@ -212,94 +283,139 @@ const std::vector<CPUSpecificParams> CPUParams5DBlocked = {
|
|||||||
cpuParams_nCdhw8c,
|
cpuParams_nCdhw8c,
|
||||||
};
|
};
|
||||||
|
|
||||||
const auto pad5DConstParamsBlocked = testing::Combine(
|
|
||||||
testing::ValuesIn(padsBegin5DConstBlocked),
|
|
||||||
testing::ValuesIn(padsEnd5DConstBlocked),
|
|
||||||
testing::ValuesIn(argPadValue),
|
|
||||||
testing::Values(ngraph::helpers::PadMode::CONSTANT),
|
|
||||||
testing::ValuesIn(inputPrecisions),
|
|
||||||
testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
|
||||||
testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
|
||||||
testing::Values(InferenceEngine::Layout::ANY),
|
|
||||||
testing::Values(std::vector<size_t>{3, 16, 5, 5, 5}),
|
|
||||||
testing::Values(CommonTestUtils::DEVICE_CPU)
|
|
||||||
);
|
|
||||||
|
|
||||||
INSTANTIATE_TEST_SUITE_P(
|
INSTANTIATE_TEST_SUITE_P(
|
||||||
smoke_CPUPad5DConstBlocked,
|
smoke_CPUPad5DConstBlocked,
|
||||||
PadLayerCPUTest,
|
PadLayerCPUTest,
|
||||||
::testing::Combine(
|
::testing::Combine(
|
||||||
pad5DConstParamsBlocked,
|
::testing::ValuesIn(static_shapes_to_test_representation({{3, 16, 5, 5, 5}})),
|
||||||
|
::testing::ValuesIn(inputPrecisions),
|
||||||
|
::testing::ValuesIn(padsBegin5DConstBlocked),
|
||||||
|
::testing::ValuesIn(padsEnd5DConstBlocked),
|
||||||
|
::testing::ValuesIn(argPadValue),
|
||||||
|
::testing::Values(ngraph::helpers::PadMode::CONSTANT),
|
||||||
::testing::ValuesIn(CPUParams5DBlocked)),
|
::testing::ValuesIn(CPUParams5DBlocked)),
|
||||||
PadLayerCPUTest::getTestCaseName
|
PadLayerCPUTest::getTestCaseName
|
||||||
);
|
);
|
||||||
|
|
||||||
const auto pad5DConstParams = testing::Combine(
|
|
||||||
testing::ValuesIn(padsBegin5D),
|
|
||||||
testing::ValuesIn(padsEnd5D),
|
|
||||||
testing::ValuesIn(argPadValue),
|
|
||||||
testing::Values(ngraph::helpers::PadMode::CONSTANT),
|
|
||||||
testing::ValuesIn(inputPrecisions),
|
|
||||||
testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
|
||||||
testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
|
||||||
testing::Values(InferenceEngine::Layout::ANY),
|
|
||||||
testing::Values(std::vector<size_t>{3, 16, 10, 5, 5}),
|
|
||||||
testing::Values(CommonTestUtils::DEVICE_CPU)
|
|
||||||
);
|
|
||||||
|
|
||||||
INSTANTIATE_TEST_SUITE_P(
|
INSTANTIATE_TEST_SUITE_P(
|
||||||
smoke_CPUPad5DConst,
|
smoke_CPUPad5DConst,
|
||||||
PadLayerCPUTest,
|
PadLayerCPUTest,
|
||||||
::testing::Combine(
|
::testing::Combine(
|
||||||
pad5DConstParams,
|
::testing::ValuesIn(static_shapes_to_test_representation({{3, 16, 5, 5, 5}})),
|
||||||
|
::testing::ValuesIn(inputPrecisions),
|
||||||
|
::testing::ValuesIn(padsBegin5D),
|
||||||
|
::testing::ValuesIn(padsEnd5D),
|
||||||
|
::testing::ValuesIn(argPadValue),
|
||||||
|
::testing::Values(ngraph::helpers::PadMode::CONSTANT),
|
||||||
::testing::Values(cpuParams_ndhwc)),
|
::testing::Values(cpuParams_ndhwc)),
|
||||||
PadLayerCPUTest::getTestCaseName
|
PadLayerCPUTest::getTestCaseName
|
||||||
);
|
);
|
||||||
|
|
||||||
const auto pad5DParamsBlocked = testing::Combine(
|
|
||||||
testing::ValuesIn(padsBegin5DBlocked),
|
|
||||||
testing::ValuesIn(padsEnd5DBlocked),
|
|
||||||
testing::Values(0),
|
|
||||||
testing::ValuesIn(padMode),
|
|
||||||
testing::ValuesIn(inputPrecisions),
|
|
||||||
testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
|
||||||
testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
|
||||||
testing::Values(InferenceEngine::Layout::ANY),
|
|
||||||
testing::Values(std::vector<size_t>{3, 16, 5, 5, 5}),
|
|
||||||
testing::Values(CommonTestUtils::DEVICE_CPU)
|
|
||||||
);
|
|
||||||
|
|
||||||
INSTANTIATE_TEST_SUITE_P(
|
INSTANTIATE_TEST_SUITE_P(
|
||||||
smoke_CPUPad5DBlocked,
|
smoke_CPUPad5DBlocked,
|
||||||
PadLayerCPUTest,
|
PadLayerCPUTest,
|
||||||
::testing::Combine(
|
::testing::Combine(
|
||||||
pad5DParamsBlocked,
|
::testing::ValuesIn(static_shapes_to_test_representation({{3, 16, 5, 5, 5}})),
|
||||||
|
::testing::ValuesIn(inputPrecisions),
|
||||||
|
::testing::ValuesIn(padsBegin5DBlocked),
|
||||||
|
::testing::ValuesIn(padsEnd5DBlocked),
|
||||||
|
::testing::Values(0),
|
||||||
|
::testing::ValuesIn(padMode),
|
||||||
::testing::ValuesIn(CPUParams5DBlocked)),
|
::testing::ValuesIn(CPUParams5DBlocked)),
|
||||||
PadLayerCPUTest::getTestCaseName
|
PadLayerCPUTest::getTestCaseName
|
||||||
);
|
);
|
||||||
|
|
||||||
const auto pad5DParams = testing::Combine(
|
|
||||||
testing::ValuesIn(padsBegin5D),
|
|
||||||
testing::ValuesIn(padsEnd5D),
|
|
||||||
testing::Values(0),
|
|
||||||
testing::ValuesIn(padMode),
|
|
||||||
testing::ValuesIn(inputPrecisions),
|
|
||||||
testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
|
||||||
testing::Values(InferenceEngine::Precision::UNSPECIFIED),
|
|
||||||
testing::Values(InferenceEngine::Layout::ANY),
|
|
||||||
testing::Values(std::vector<size_t>{3, 16, 5, 5, 5}),
|
|
||||||
testing::Values(CommonTestUtils::DEVICE_CPU)
|
|
||||||
);
|
|
||||||
|
|
||||||
INSTANTIATE_TEST_SUITE_P(
|
INSTANTIATE_TEST_SUITE_P(
|
||||||
smoke_CPUPad5D,
|
smoke_CPUPad5D,
|
||||||
PadLayerCPUTest,
|
PadLayerCPUTest,
|
||||||
::testing::Combine(
|
::testing::Combine(
|
||||||
pad5DParams,
|
::testing::ValuesIn(static_shapes_to_test_representation({{3, 16, 5, 5, 5}})),
|
||||||
|
::testing::ValuesIn(inputPrecisions),
|
||||||
|
::testing::ValuesIn(padsBegin5D),
|
||||||
|
::testing::ValuesIn(padsEnd5D),
|
||||||
|
::testing::Values(0),
|
||||||
|
::testing::ValuesIn(padMode),
|
||||||
::testing::Values(cpuParams_ndhwc)),
|
::testing::Values(cpuParams_ndhwc)),
|
||||||
PadLayerCPUTest::getTestCaseName
|
PadLayerCPUTest::getTestCaseName
|
||||||
);
|
);
|
||||||
|
|
||||||
|
/* *======================* *=====================* *======================* */
|
||||||
|
|
||||||
|
/* *======================* Dynamic Shapes Tests 5D *======================* */
|
||||||
|
|
||||||
|
const std::vector<InputShape> inputShapesDynamic5D = {
|
||||||
|
{{-1, -1, -1, -1, -1}, // dynamic
|
||||||
|
{{5, 36, 5, 5, 5}, {3, 16, 8, 5, 7}, {3, 24, 10, 10, 10}}}, // target
|
||||||
|
|
||||||
|
{{-1, 32, -1, -1, -1}, // dynamic
|
||||||
|
{{5, 32, 5, 5, 5}, {3, 32, 8, 5, 7}, {3, 32, 10, 10, 10}}}, // target
|
||||||
|
|
||||||
|
{{{1, 5}, {16, 32}, {1, 16}, {1, 16}, {1, 16}}, // dynamic
|
||||||
|
{{3, 16, 5, 5, 5}, {3, 24, 8, 5, 7}, {4, 32, 10, 10, 10}}}, // target
|
||||||
|
};
|
||||||
|
|
||||||
|
const std::vector<CPUSpecificParams> CPUParams5DDynamic = {
|
||||||
|
cpuParams_ndhwc,
|
||||||
|
cpuParams_ncdhw
|
||||||
|
};
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(
|
||||||
|
smoke_CPUPadDynamic5DConst,
|
||||||
|
PadLayerCPUTest,
|
||||||
|
::testing::Combine(
|
||||||
|
::testing::ValuesIn(inputShapesDynamic5D),
|
||||||
|
::testing::ValuesIn(inputPrecisions),
|
||||||
|
::testing::ValuesIn(padsBegin5D),
|
||||||
|
::testing::ValuesIn(padsEnd5D),
|
||||||
|
::testing::ValuesIn(argPadValue),
|
||||||
|
::testing::Values(ngraph::helpers::PadMode::CONSTANT),
|
||||||
|
::testing::ValuesIn(CPUParams5DDynamic)),
|
||||||
|
PadLayerCPUTest::getTestCaseName
|
||||||
|
);
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(
|
||||||
|
smoke_CPUPadDynamic5DConstBlocked,
|
||||||
|
PadLayerCPUTest,
|
||||||
|
::testing::Combine(
|
||||||
|
::testing::Values(inputShapesDynamic5D[1]),
|
||||||
|
::testing::ValuesIn(inputPrecisions),
|
||||||
|
::testing::ValuesIn(padsBegin5DConstBlocked),
|
||||||
|
::testing::ValuesIn(padsEnd5DConstBlocked),
|
||||||
|
::testing::ValuesIn(argPadValue),
|
||||||
|
::testing::Values(ngraph::helpers::PadMode::CONSTANT),
|
||||||
|
::testing::ValuesIn(CPUParams5DBlocked)),
|
||||||
|
PadLayerCPUTest::getTestCaseName
|
||||||
|
);
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(
|
||||||
|
smoke_CPUPadDynamic5D,
|
||||||
|
PadLayerCPUTest,
|
||||||
|
::testing::Combine(
|
||||||
|
::testing::ValuesIn(inputShapesDynamic5D),
|
||||||
|
::testing::ValuesIn(inputPrecisions),
|
||||||
|
::testing::ValuesIn(padsBegin5D),
|
||||||
|
::testing::ValuesIn(padsEnd5D),
|
||||||
|
::testing::Values(0),
|
||||||
|
::testing::ValuesIn(padMode),
|
||||||
|
::testing::ValuesIn(CPUParams5DDynamic)),
|
||||||
|
PadLayerCPUTest::getTestCaseName
|
||||||
|
);
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(
|
||||||
|
smoke_CPUPadDynamic5DBlocked,
|
||||||
|
PadLayerCPUTest,
|
||||||
|
::testing::Combine(
|
||||||
|
::testing::Values(inputShapesDynamic5D[1]),
|
||||||
|
::testing::ValuesIn(inputPrecisions),
|
||||||
|
::testing::ValuesIn(padsBegin5DBlocked),
|
||||||
|
::testing::ValuesIn(padsEnd5DBlocked),
|
||||||
|
::testing::Values(0),
|
||||||
|
::testing::ValuesIn(padMode),
|
||||||
|
::testing::ValuesIn(CPUParams5DBlocked)),
|
||||||
|
PadLayerCPUTest::getTestCaseName
|
||||||
|
);
|
||||||
|
|
||||||
|
/* *======================* *=====================* *======================* */
|
||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
} // namespace CPULayerTestsDefinitions
|
} // namespace CPULayerTestsDefinitions
|
||||||
|
@ -159,6 +159,18 @@ void op::v1::Pad::validate_and_infer_types() {
|
|||||||
"of at least 2 at each "
|
"of at least 2 at each "
|
||||||
"spatial axis.");
|
"spatial axis.");
|
||||||
}
|
}
|
||||||
|
NODE_VALIDATION_CHECK(
|
||||||
|
this,
|
||||||
|
m_pad_mode != op::PadMode::REFLECT || (pads_begin_coord[i] < arg_shape[i].get_length() &&
|
||||||
|
pads_end_coord[i] < arg_shape[i].get_length()),
|
||||||
|
"REFLECT padding mode requires that 'pads_begin[D]' and 'pads_end[D]' "
|
||||||
|
"must be not greater than 'data_shape[D] - 1'.");
|
||||||
|
NODE_VALIDATION_CHECK(
|
||||||
|
this,
|
||||||
|
m_pad_mode != op::PadMode::SYMMETRIC || (pads_begin_coord[i] <= arg_shape[i].get_length() &&
|
||||||
|
pads_end_coord[i] <= arg_shape[i].get_length()),
|
||||||
|
"SYMMETRIC padding mode requires that 'pads_begin[D]' and 'pads_end[D]' "
|
||||||
|
"must be not greater than 'data_shape[D]'.");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
set_output_type(0, get_input_element_type(0), result_dims);
|
set_output_type(0, get_input_element_type(0), result_dims);
|
||||||
|
Loading…
Reference in New Issue
Block a user