[CPU]Enable pad with runtime begin/end/value (#15146)
This commit is contained in:
parent
a1a6ba911a
commit
166566a777
@ -17,7 +17,6 @@
|
||||
using namespace dnnl;
|
||||
using namespace InferenceEngine;
|
||||
|
||||
#define THROW_ERROR IE_THROW() << "Pad layer with name '" << getName() << "' "
|
||||
|
||||
namespace ov {
|
||||
namespace intel_cpu {
|
||||
@ -32,28 +31,40 @@ bool Pad::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, st
|
||||
}
|
||||
|
||||
const auto pad_mode = pad->get_pad_mode();
|
||||
if (!one_of(pad_mode, ngraph::op::PadMode::CONSTANT, ngraph::op::PadMode::EDGE, ngraph::op::PadMode::REFLECT,
|
||||
ngraph::op::PadMode::SYMMETRIC)) {
|
||||
if (!one_of(pad_mode,
|
||||
ngraph::op::PadMode::CONSTANT,
|
||||
ngraph::op::PadMode::EDGE,
|
||||
ngraph::op::PadMode::REFLECT,
|
||||
ngraph::op::PadMode::SYMMETRIC)) {
|
||||
errorMessage = "Has unsupported pad_mode: " + ngraph::as_string(pad_mode);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (op->get_input_node_shared_ptr(PADS_BEGIN_ID)->get_type_info() != ov::op::v0::Constant::get_type_info_static() ||
|
||||
op->get_input_node_shared_ptr(PADS_END_ID)->get_type_info() != ov::op::v0::Constant::get_type_info_static() ||
|
||||
(pad->get_input_size() == 4 && pad->get_pad_mode() == ngraph::op::PadMode::CONSTANT &&
|
||||
op->get_input_node_shared_ptr(PAD_VALUE_ID)->get_type_info() != ov::op::v0::Constant::get_type_info_static())) {
|
||||
// TODO: Support pads_begin, pads_end, pad_value inputs for dynamic shapes.
|
||||
errorMessage = "Only Constant 'pads_begin', 'pads_end' and 'pad_value' inputs are supported.";
|
||||
return false;
|
||||
}
|
||||
auto checkPadConstVal = [&](size_t id) {
|
||||
CoordinateDiff padParams;
|
||||
std::string padStr = "";
|
||||
if (id == PADS_BEGIN_ID) {
|
||||
padParams = pad->get_pads_begin();
|
||||
padStr = "pad_begin";
|
||||
} else if (id == PADS_END_ID) {
|
||||
padParams = pad->get_pads_end();
|
||||
padStr = "pad_end";
|
||||
}
|
||||
if (std::any_of(padParams.begin(), padParams.end(), [](ptrdiff_t x) {
|
||||
return x < 0;
|
||||
})) {
|
||||
errorMessage = "Doesn't support " + padStr + " with negative values";
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
};
|
||||
|
||||
const auto pb = pad->get_pads_begin();
|
||||
const auto pe = pad->get_pads_end();
|
||||
if (std::any_of(pb.begin(), pb.end(), [](ptrdiff_t x) { return x < 0; }) ||
|
||||
std::any_of(pe.begin(), pe.end(), [](ptrdiff_t x) { return x < 0; })) {
|
||||
errorMessage = "Doesn't support 'pads_begin' or 'pads_end' with negative values";
|
||||
if (op->get_input_node_shared_ptr(PADS_BEGIN_ID)->get_type_info() == ov::op::v0::Constant::get_type_info_static()
|
||||
&& !checkPadConstVal(PADS_BEGIN_ID))
|
||||
return false;
|
||||
if (op->get_input_node_shared_ptr(PADS_END_ID)->get_type_info() == ov::op::v0::Constant::get_type_info_static()
|
||||
&& !checkPadConstVal(PADS_END_ID))
|
||||
return false;
|
||||
}
|
||||
} catch (...) {
|
||||
return false;
|
||||
}
|
||||
@ -61,49 +72,60 @@ bool Pad::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, st
|
||||
}
|
||||
|
||||
Pad::Pad(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr context)
|
||||
: Node(op, context, NgraphShapeInferFactory(op, PortMask(PADS_BEGIN_ID, PADS_END_ID))) {
|
||||
: Node(op, context, NgraphShapeInferFactory(op, PortMask(PADS_BEGIN_ID, PADS_END_ID))) {
|
||||
std::string errorMessage;
|
||||
if (!isSupportedOperation(op, errorMessage)) {
|
||||
IE_THROW(NotImplemented) << errorMessage;
|
||||
}
|
||||
|
||||
errorPrefix = NameFromType(getType()) + " node with name '" + getName() + "' ";
|
||||
if (inputShapes.size() != 3 && inputShapes.size() != 4)
|
||||
THROW_ERROR << " has incorrect number of input edges";
|
||||
IE_THROW() << errorPrefix << " has incorrect number of input edges";
|
||||
if (outputShapes.size() != 1)
|
||||
THROW_ERROR << "Incorrect number of output edges";
|
||||
IE_THROW() << errorPrefix << "Incorrect number of output edges";
|
||||
|
||||
const size_t srcDimsRank = inputShapes[DATA_ID].getRank();
|
||||
const size_t dstDimsRank = outputShapes[DATA_ID].getRank();
|
||||
if (srcDimsRank != dstDimsRank)
|
||||
THROW_ERROR << "has incorrect number of input/output dimensions!";
|
||||
IE_THROW() << errorPrefix << "has incorrect number of input/output dimensions!";
|
||||
|
||||
auto pad = ov::as_type_ptr<const ngraph::opset1::Pad>(op);
|
||||
if (!pad) {
|
||||
THROW_ERROR << "couldn't be casted to op of opset1";
|
||||
IE_THROW() << errorPrefix << "couldn't be casted to op of opset1";
|
||||
}
|
||||
|
||||
if (op->get_input_node_shared_ptr(PADS_BEGIN_ID)->get_type_info() == ov::op::v0::Constant::get_type_info_static() &&
|
||||
op->get_input_node_shared_ptr(PADS_END_ID)->get_type_info() == ov::op::v0::Constant::get_type_info_static()) {
|
||||
const auto pb = pad->get_pads_begin();
|
||||
const auto pe = pad->get_pads_end();
|
||||
shapeHasDataDependency = !ov::is_type<ov::op::v0::Constant>(op->get_input_node_shared_ptr(PADS_BEGIN_ID)) ||
|
||||
!ov::is_type<ov::op::v0::Constant>(op->get_input_node_shared_ptr(PADS_END_ID));
|
||||
|
||||
for (size_t i = 0; i < pb.size(); i++)
|
||||
attrs.padsBegin.push_back(static_cast<unsigned int>(pb[i]));
|
||||
for (size_t i = 0; i < pe.size(); i++)
|
||||
attrs.padsEnd.push_back(static_cast<unsigned int>(pe[i]));
|
||||
auto fillingInParameters = [&](std::vector<unsigned int>& parameter, const size_t type) {
|
||||
if (type < PADS_BEGIN_ID)
|
||||
return;
|
||||
|
||||
if (attrs.padsBegin.size() != srcDimsRank || attrs.padsEnd.size() != srcDimsRank)
|
||||
THROW_ERROR << "has incorrect number of input/output dimensions!";
|
||||
}
|
||||
const auto constNode = ov::as_type_ptr<const ngraph::opset1::Constant>(op->get_input_node_shared_ptr(type));
|
||||
if (constNode) {
|
||||
auto pad_data = constNode->cast_vector<uint32_t>();
|
||||
for (const auto& value : pad_data) {
|
||||
parameter.push_back(value);
|
||||
}
|
||||
if (parameter.size() != srcDimsRank)
|
||||
IE_THROW() << errorPrefix << "has incorrect number of input/output dimensions!";
|
||||
}
|
||||
};
|
||||
|
||||
fillingInParameters(attrs.padsBegin, PADS_BEGIN_ID);
|
||||
fillingInParameters(attrs.padsEnd, PADS_END_ID);
|
||||
|
||||
const auto pad_mode = pad->get_pad_mode();
|
||||
isPadValueSpecified = pad->get_input_size() == 4;
|
||||
if (pad_mode == ngraph::op::PadMode::CONSTANT) {
|
||||
attrs.padMode = CONSTANT;
|
||||
if (isPadValueSpecified && op->get_input_node_shared_ptr(PAD_VALUE_ID)->get_type_info() == ov::op::v0::Constant::get_type_info_static()) {
|
||||
if (isPadValueSpecified && op->get_input_node_shared_ptr(PAD_VALUE_ID)->get_type_info() ==
|
||||
ov::op::v0::Constant::get_type_info_static()) {
|
||||
if (!ngraph::is_scalar(pad->get_input_shape(PAD_VALUE_ID)))
|
||||
THROW_ERROR << "has non scalar 'pad_value' input";
|
||||
attrs.padValue = ov::as_type_ptr<const ngraph::opset1::Constant>(pad->get_input_node_shared_ptr(PAD_VALUE_ID))->cast_vector<float>()[0];
|
||||
IE_THROW() << errorPrefix << "has non scalar 'pad_value' input";
|
||||
attrs.padValue =
|
||||
ov::as_type_ptr<const ngraph::opset1::Constant>(pad->get_input_node_shared_ptr(PAD_VALUE_ID))
|
||||
->cast_vector<float>()[0];
|
||||
attrs.constPadValue = true;
|
||||
}
|
||||
} else if (pad_mode == ngraph::op::PadMode::EDGE) {
|
||||
attrs.padMode = EDGE;
|
||||
@ -112,7 +134,7 @@ Pad::Pad(const std::shared_ptr<ngraph::Node>& op, const GraphContext::CPtr conte
|
||||
} else if (pad_mode == ngraph::op::PadMode::SYMMETRIC) {
|
||||
attrs.padMode = SYMMETRIC;
|
||||
} else {
|
||||
THROW_ERROR << "has unsupported pad_mode: " + ngraph::as_string(pad_mode);
|
||||
IE_THROW() << errorPrefix << "has unsupported pad_mode: " + ngraph::as_string(pad_mode);
|
||||
}
|
||||
}
|
||||
|
||||
@ -157,74 +179,39 @@ void Pad::initSupportedPrimitiveDescriptors() {
|
||||
auto canUseBlocked = [&](const size_t blockSize) {
|
||||
const auto& srcDims = inputDataShape.getDims();
|
||||
return srcDims[1] != Shape::UNDEFINED_DIM && srcDims[1] % blockSize == 0 &&
|
||||
((attrs.padMode == CONSTANT && attrs.padsBegin[1] % blockSize == 0 && attrs.padsEnd[1] % blockSize == 0) ||
|
||||
(attrs.padMode != CONSTANT && attrs.padsBegin[1] == 0 && attrs.padsEnd[1] == 0));
|
||||
((attrs.padMode == CONSTANT && attrs.padsBegin[1] % blockSize == 0 &&
|
||||
attrs.padsEnd[1] % blockSize == 0) ||
|
||||
(attrs.padMode != CONSTANT && attrs.padsBegin[1] == 0 && attrs.padsEnd[1] == 0));
|
||||
};
|
||||
|
||||
if (numOfDims == 4 || numOfDims == 5) {
|
||||
if (canUseBlocked(8))
|
||||
pushSupportedPrimitiveDescriptor(LayoutType::nCsp8c);
|
||||
if (canUseBlocked(16))
|
||||
pushSupportedPrimitiveDescriptor(LayoutType::nCsp16c);
|
||||
if (!shapeHasDataDependency) {
|
||||
if (canUseBlocked(8))
|
||||
pushSupportedPrimitiveDescriptor(LayoutType::nCsp8c);
|
||||
if (canUseBlocked(16))
|
||||
pushSupportedPrimitiveDescriptor(LayoutType::nCsp16c);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool Pad::needShapeInfer() const {
|
||||
return Node::inputShapesModified() || shapeHasDataDependency;
|
||||
}
|
||||
|
||||
bool Pad::needPrepareParams() const {
|
||||
return Node::inputShapesModified() || shapeHasDataDependency;
|
||||
}
|
||||
|
||||
void Pad::createPrimitive() {
|
||||
auto& dstMemPtr = getChildEdgeAt(0)->getMemoryPtr();
|
||||
auto& srcMemPtr = getParentEdgeAt(0)->getMemoryPtr();
|
||||
if (!dstMemPtr || !dstMemPtr->isAllocated())
|
||||
THROW_ERROR << "has not allocated source memory.";
|
||||
if (!srcMemPtr || !srcMemPtr->isAllocated())
|
||||
THROW_ERROR << "has not allocated destination memory.";
|
||||
if (getSelectedPrimitiveDescriptor() == nullptr)
|
||||
THROW_ERROR << "has unidentified preferable primitive descriptor";
|
||||
|
||||
// pads are constant, so we can calculate new collapsing pads for first target dimensions and use it for the next dimensions
|
||||
// to avoid permanent identical pad calculations
|
||||
const size_t blockSize = srcMemPtr->getDesc().hasLayoutType(LayoutType::nCsp16c) ? 16 :
|
||||
(srcMemPtr->getDesc().hasLayoutType(LayoutType::nCsp8c) ? 8 : 1);
|
||||
if (blockSize > 1) {
|
||||
attrs.padsBegin[1] /= blockSize;
|
||||
attrs.padsEnd[1] /= blockSize;
|
||||
attrs.padsBegin.push_back(0);
|
||||
attrs.padsEnd.push_back(0);
|
||||
} else {
|
||||
auto order = getParentEdgeAt(0)->getMemory().GetDescWithType<BlockedMemoryDesc>()->getOrder();
|
||||
std::vector<unsigned int> newPadsBegin(attrs.padsBegin.size(), 0), newPadsEnd(attrs.padsEnd.size(), 0);
|
||||
for (size_t i = 0; i < attrs.padsBegin.size(); ++i) {
|
||||
newPadsBegin[i] = attrs.padsBegin[order[i]];
|
||||
newPadsEnd[i] = attrs.padsEnd[order[i]];
|
||||
}
|
||||
attrs.padsBegin = newPadsBegin;
|
||||
attrs.padsEnd = newPadsEnd;
|
||||
}
|
||||
|
||||
// collapse dimensions
|
||||
attrs.beginPadIdx = 0;
|
||||
attrs.endPadIdx = attrs.padsBegin.size() - 1;
|
||||
|
||||
for (int i = 0; i < attrs.padsBegin.size(); ++i) {
|
||||
if (attrs.padsBegin[i] != 0 || attrs.padsEnd[i] != 0) {
|
||||
attrs.beginPadIdx = i - 1;
|
||||
break;
|
||||
if (srcMemory.empty()) {
|
||||
for (int i = 0; i < getOriginalInputsNumber(); i++) {
|
||||
srcMemory.push_back(getParentEdgeAt(i)->getMemoryPtr());
|
||||
}
|
||||
}
|
||||
|
||||
for (int i = attrs.padsBegin.size() - 1; i >= 0; --i) {
|
||||
if (attrs.padsBegin[i] != 0 || attrs.padsEnd[i] != 0) {
|
||||
attrs.endPadIdx = i;
|
||||
break;
|
||||
}
|
||||
if (dstMemory.empty()) {
|
||||
dstMemory.push_back(getChildEdgeAt(0)->getMemoryPtr());
|
||||
}
|
||||
|
||||
if (attrs.beginPadIdx > 0) {
|
||||
attrs.padsBegin.erase(attrs.padsBegin.begin() + 1, attrs.padsBegin.begin() + attrs.beginPadIdx + 1);
|
||||
attrs.padsEnd.erase(attrs.padsEnd.begin() + 1, attrs.padsEnd.begin() + attrs.beginPadIdx + 1);
|
||||
}
|
||||
|
||||
attrs.prc = srcMemPtr->getDesc().getPrecision();
|
||||
|
||||
if (inputShapesDefined()) {
|
||||
if (inputShapesDefined() && isExecutable() && !shapeHasDataDependency) {
|
||||
prepareParams();
|
||||
updateLastInputDims();
|
||||
}
|
||||
@ -235,26 +222,120 @@ bool Pad::isExecutable() const {
|
||||
}
|
||||
|
||||
void Pad::prepareParams() {
|
||||
execPtr = std::make_shared<PadExecutor>(attrs,
|
||||
getParentEdgeAt(0)->getMemoryPtr()->GetDescWithType<BlockedMemoryDesc>()->getBlockDims(),
|
||||
getChildEdgeAt(0)->getMemoryPtr()->GetDescWithType<BlockedMemoryDesc>()->getBlockDims());
|
||||
updateLastInputDims();
|
||||
execPtr = std::make_shared<PadExecutor>(
|
||||
attrs,
|
||||
srcMemory,
|
||||
dstMemory,
|
||||
errorPrefix);
|
||||
}
|
||||
|
||||
Pad::PadExecutor::PadExecutor(const PadAttrs& attrs,
|
||||
const VectorDims& srcDims,
|
||||
const VectorDims& dstDims) {
|
||||
params.attrs = attrs;
|
||||
params.dstDims = dstDims;
|
||||
const std::vector<MemoryCPtr>& srcMemory,
|
||||
const std::vector<MemoryCPtr>& dstMemory,
|
||||
const std::string& errorPrefix)
|
||||
: errorPrefix(errorPrefix) {
|
||||
paramsInitialization(attrs, srcMemory, dstMemory);
|
||||
workPartition();
|
||||
}
|
||||
|
||||
zeroInputDimsCase = std::any_of(srcDims.begin(), srcDims.end(), [](size_t dim) { return dim == 0; } ) &&
|
||||
std::none_of(dstDims.begin(), dstDims.end(), [](size_t dim) { return dim == 0; } );
|
||||
void Pad::PadExecutor::paramsInitialization(const PadAttrs& attrs,
|
||||
const std::vector<MemoryCPtr>& srcMemory,
|
||||
const std::vector<MemoryCPtr>& dstMemory) {
|
||||
params.attrs = attrs;
|
||||
auto& srcMemPtr = srcMemory[DATA_ID];
|
||||
auto& dstMemPtr = dstMemory[DATA_ID];
|
||||
if (!dstMemPtr || !dstMemPtr->isAllocated())
|
||||
IE_THROW() << errorPrefix << "has not allocated source memory.";
|
||||
if (!srcMemPtr || !srcMemPtr->isAllocated())
|
||||
IE_THROW() << errorPrefix << "has not allocated destination memory.";
|
||||
const auto srcBlockMemDesc = srcMemPtr->GetDescWithType<BlockedMemoryDesc>();
|
||||
const auto dstBlockMemDesc = dstMemPtr->GetDescWithType<BlockedMemoryDesc>();
|
||||
const auto& srcDims = srcBlockMemDesc->getBlockDims();
|
||||
const auto& dstDims = dstBlockMemDesc->getBlockDims();
|
||||
|
||||
params.attrs.prc = srcMemPtr->getDesc().getPrecision();
|
||||
params.srcDims = srcDims;
|
||||
params.dstDims = dstDims;
|
||||
params.dataSize = params.attrs.prc.size();
|
||||
|
||||
auto fillingInParameters =
|
||||
[&](std::vector<unsigned int>& parameter, const size_t type, const size_t size, const int value) {
|
||||
const int* ptr = reinterpret_cast<const int32_t*>(srcMemory[type]->GetPtr());
|
||||
parameter.resize(size);
|
||||
for (size_t i = 0; i < size; i++) {
|
||||
if (ptr[i] < 0)
|
||||
IE_THROW() << errorPrefix << "pad begin/end must have positive value";
|
||||
parameter[i] = static_cast<unsigned int>(ptr[i]);
|
||||
}
|
||||
};
|
||||
// if pad begin/end/value dynamic
|
||||
if (params.attrs.padsBegin.empty())
|
||||
fillingInParameters(params.attrs.padsBegin, PADS_BEGIN_ID, srcDims.size(), 0);
|
||||
if (params.attrs.padsEnd.empty())
|
||||
fillingInParameters(params.attrs.padsEnd, PADS_END_ID, srcDims.size(), 0);
|
||||
if (!params.attrs.constPadValue)
|
||||
params.attrs.padValue = reinterpret_cast<const float*>(srcMemory[PAD_VALUE_ID]->GetPtr())[0];
|
||||
// pads are constant, so we can calculate new collapsing pads for first target dimensions and use it for the next
|
||||
// dimensions to avoid permanent identical pad calculations
|
||||
const size_t blockSize = srcMemPtr->getDesc().hasLayoutType(LayoutType::nCsp16c)
|
||||
? 16
|
||||
: (srcMemPtr->getDesc().hasLayoutType(LayoutType::nCsp8c) ? 8 : 1);
|
||||
|
||||
if (blockSize > 1) {
|
||||
params.attrs.padsBegin[1] /= blockSize;
|
||||
params.attrs.padsEnd[1] /= blockSize;
|
||||
params.attrs.padsBegin.push_back(0);
|
||||
params.attrs.padsEnd.push_back(0);
|
||||
} else {
|
||||
auto order = srcBlockMemDesc->getOrder();
|
||||
std::vector<unsigned int> newPadsBegin(params.attrs.padsBegin.size(), 0),
|
||||
newPadsEnd(params.attrs.padsEnd.size(), 0);
|
||||
for (size_t i = 0; i < params.attrs.padsBegin.size(); ++i) {
|
||||
newPadsBegin[i] = params.attrs.padsBegin[order[i]];
|
||||
newPadsEnd[i] = params.attrs.padsEnd[order[i]];
|
||||
}
|
||||
params.attrs.padsBegin = newPadsBegin;
|
||||
params.attrs.padsEnd = newPadsEnd;
|
||||
}
|
||||
params.attrs.beginPadIdx = 0;
|
||||
params.attrs.endPadIdx = params.attrs.padsBegin.size() - 1;
|
||||
|
||||
for (int i = 0; i < params.attrs.padsBegin.size(); ++i) {
|
||||
if (params.attrs.padsBegin[i] != 0 || params.attrs.padsEnd[i] != 0) {
|
||||
params.attrs.beginPadIdx = i - 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
for (int i = params.attrs.padsBegin.size() - 1; i >= 0; --i) {
|
||||
if (params.attrs.padsBegin[i] != 0 || params.attrs.padsEnd[i] != 0) {
|
||||
params.attrs.endPadIdx = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (params.attrs.beginPadIdx > 0) {
|
||||
params.attrs.padsBegin.erase(params.attrs.padsBegin.begin() + 1,
|
||||
params.attrs.padsBegin.begin() + params.attrs.beginPadIdx + 1);
|
||||
params.attrs.padsEnd.erase(params.attrs.padsEnd.begin() + 1,
|
||||
params.attrs.padsEnd.begin() + params.attrs.beginPadIdx + 1);
|
||||
}
|
||||
}
|
||||
|
||||
void Pad::PadExecutor::workPartition() {
|
||||
zeroInputDimsCase = std::any_of(params.srcDims.begin(),
|
||||
params.srcDims.end(),
|
||||
[](size_t dim) {
|
||||
return dim == 0;
|
||||
}) &&
|
||||
std::none_of(params.dstDims.begin(), params.dstDims.end(), [](size_t dim) {
|
||||
return dim == 0;
|
||||
});
|
||||
if (zeroInputDimsCase) {
|
||||
return;
|
||||
}
|
||||
|
||||
params.srcDims = srcDims;
|
||||
params.dataSize = attrs.prc.size();
|
||||
|
||||
size_t nDims = params.srcDims.size();
|
||||
params.srcStrides.resize(nDims, 1);
|
||||
params.dstStrides.resize(nDims, 1);
|
||||
@ -262,7 +343,6 @@ Pad::PadExecutor::PadExecutor(const PadAttrs& attrs,
|
||||
params.srcStrides[i] = params.srcStrides[i + 1] * params.srcDims[i + 1];
|
||||
params.dstStrides[i] = params.dstStrides[i + 1] * params.dstDims[i + 1];
|
||||
}
|
||||
|
||||
params.lastDstDim = params.dstStrides[std::max(params.attrs.endPadIdx - 1, 0)];
|
||||
params.nDimsForWork = params.attrs.endPadIdx - std::max(params.attrs.beginPadIdx, 0);
|
||||
params.nThreads = params.nDimsForWork > 0 ? 0 : 1;
|
||||
@ -307,25 +387,25 @@ void Pad::PadExecutor::exec(MemoryPtr& srcMemPtr, MemoryPtr& dstMemPtr) {
|
||||
padConstant(srcMemPtr, dstMemPtr);
|
||||
} else {
|
||||
switch (params.attrs.padMode) {
|
||||
case CONSTANT:
|
||||
padConstant(srcMemPtr, dstMemPtr);
|
||||
break;
|
||||
case EDGE:
|
||||
padEdge(srcMemPtr, dstMemPtr);
|
||||
break;
|
||||
case REFLECT:
|
||||
padReflectOrSymmetric(srcMemPtr, dstMemPtr);
|
||||
break;
|
||||
case SYMMETRIC:
|
||||
padReflectOrSymmetric(srcMemPtr, dstMemPtr, true);
|
||||
break;
|
||||
case CONSTANT:
|
||||
padConstant(srcMemPtr, dstMemPtr);
|
||||
break;
|
||||
case EDGE:
|
||||
padEdge(srcMemPtr, dstMemPtr);
|
||||
break;
|
||||
case REFLECT:
|
||||
padReflectOrSymmetric(srcMemPtr, dstMemPtr);
|
||||
break;
|
||||
case SYMMETRIC:
|
||||
padReflectOrSymmetric(srcMemPtr, dstMemPtr, true);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void Pad::execute(dnnl::stream strm) {
|
||||
if (!execPtr)
|
||||
THROW_ERROR << "has not compiled executor.";
|
||||
IE_THROW() << errorPrefix << "has not compiled executor.";
|
||||
|
||||
execPtr->exec(getParentEdgeAt(0)->getMemoryPtr(), getChildEdgeAt(0)->getMemoryPtr());
|
||||
}
|
||||
@ -358,8 +438,11 @@ void Pad::PadExecutor::padConstant(MemoryPtr& srcMemPtr, MemoryPtr& dstMemPtr) {
|
||||
return;
|
||||
}
|
||||
|
||||
PadContext ctx { this, srcMemPtr, dstMemPtr };
|
||||
OV_SWITCH(intel_cpu, PadConstantEmitter, ctx, params.attrs.prc,
|
||||
PadContext ctx{this, srcMemPtr, dstMemPtr};
|
||||
OV_SWITCH(intel_cpu,
|
||||
PadConstantEmitter,
|
||||
ctx,
|
||||
params.attrs.prc,
|
||||
OV_CASE(InferenceEngine::Precision::FP32, float),
|
||||
OV_CASE(InferenceEngine::Precision::I32, int32_t),
|
||||
OV_CASE(InferenceEngine::Precision::BF16, bfloat16_t),
|
||||
@ -367,7 +450,7 @@ void Pad::PadExecutor::padConstant(MemoryPtr& srcMemPtr, MemoryPtr& dstMemPtr) {
|
||||
OV_CASE(InferenceEngine::Precision::U8, uint8_t));
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
template <typename T>
|
||||
void Pad::PadExecutor::padConstantCommon(MemoryPtr& srcMemPtr, MemoryPtr& dstMemPtr) {
|
||||
T* dstData = reinterpret_cast<T*>(dstMemPtr->GetPtr());
|
||||
const T value = static_cast<T>(params.attrs.padValue);
|
||||
@ -485,8 +568,11 @@ void Pad::PadExecutor::padEdge(MemoryPtr& srcMemPtr, MemoryPtr& dstMemPtr) {
|
||||
for (size_t iwork = start; iwork < end; ++iwork, dstIdx += params.lastDstDim) {
|
||||
size_t srcIdx = 0;
|
||||
for (size_t idx = 0; idx < params.nDimsForWork; ++idx) {
|
||||
size_t shift = (indexes[idx] < params.attrs.padsBegin[idx]) ? 0 :
|
||||
((indexes[idx] >= params.srcODims[idx]) ? (params.srcDims[idx] - 1) : (indexes[idx] - params.attrs.padsBegin[idx]));
|
||||
size_t shift =
|
||||
(indexes[idx] < params.attrs.padsBegin[idx])
|
||||
? 0
|
||||
: ((indexes[idx] >= params.srcODims[idx]) ? (params.srcDims[idx] - 1)
|
||||
: (indexes[idx] - params.attrs.padsBegin[idx]));
|
||||
srcIdx += shift * params.srcStrides[idx];
|
||||
}
|
||||
srcIdx *= params.dataSize;
|
||||
@ -498,7 +584,8 @@ void Pad::PadExecutor::padEdge(MemoryPtr& srcMemPtr, MemoryPtr& dstMemPtr) {
|
||||
|
||||
for (size_t i = 0; i < params.attrs.padsEnd[params.nDimsForWork]; ++i)
|
||||
cpu_memcpy(&dstData[dstIdx + beginShift + copySize + i * params.shift],
|
||||
&srcData[srcIdx + (params.srcDims[params.nDimsForWork] - 1) * params.shift], params.shift);
|
||||
&srcData[srcIdx + (params.srcDims[params.nDimsForWork] - 1) * params.shift],
|
||||
params.shift);
|
||||
|
||||
parallel_step(params.nDimsForWork, params.dstDims, indexes);
|
||||
}
|
||||
@ -523,24 +610,31 @@ void Pad::PadExecutor::padReflectOrSymmetric(MemoryPtr& srcMemPtr, MemoryPtr& ds
|
||||
for (size_t iwork = start; iwork < end; ++iwork, dstIdx += params.lastDstDim) {
|
||||
size_t srcIdx = 0;
|
||||
for (size_t i = 0; i < params.nDimsForWork; ++i) {
|
||||
size_t idx = (indexes[i] < params.attrs.padsBegin[i]) ? (params.attrs.padsBegin[i] - indexes[i] - shift) :
|
||||
((indexes[i] >= params.srcODims[i]) ? (params.srcDimsForReflectOrSymmetric[i] - indexes[i]) :
|
||||
(indexes[i] - params.attrs.padsBegin[i]));
|
||||
size_t idx =
|
||||
(indexes[i] < params.attrs.padsBegin[i])
|
||||
? (params.attrs.padsBegin[i] - indexes[i] - shift)
|
||||
: ((indexes[i] >= params.srcODims[i]) ? (params.srcDimsForReflectOrSymmetric[i] - indexes[i])
|
||||
: (indexes[i] - params.attrs.padsBegin[i]));
|
||||
srcIdx += idx * params.srcStrides[i];
|
||||
}
|
||||
srcIdx *= params.dataSize;
|
||||
|
||||
for (size_t i = 0; i < params.attrs.padsBegin[params.nDimsForWork]; ++i)
|
||||
cpu_memcpy(&dstData[dstIdx + i * params.shift],
|
||||
&srcData[srcIdx + (params.attrs.padsBegin[params.nDimsForWork] - shift - i) * params.shift], params.shift);
|
||||
&srcData[srcIdx + (params.attrs.padsBegin[params.nDimsForWork] - shift - i) * params.shift],
|
||||
params.shift);
|
||||
|
||||
cpu_memcpy(&dstData[dstIdx + params.attrs.padsBegin[params.nDimsForWork] * params.shift], &srcData[srcIdx],
|
||||
cpu_memcpy(&dstData[dstIdx + params.attrs.padsBegin[params.nDimsForWork] * params.shift],
|
||||
&srcData[srcIdx],
|
||||
params.srcDims[params.nDimsForWork] * params.shift);
|
||||
|
||||
size_t srcShift = (params.srcDimsForReflectOrSymmetric[params.nDimsForWork] - params.srcODims[params.nDimsForWork]) * params.shift;
|
||||
size_t srcShift =
|
||||
(params.srcDimsForReflectOrSymmetric[params.nDimsForWork] - params.srcODims[params.nDimsForWork]) *
|
||||
params.shift;
|
||||
for (size_t i = 0; i < params.attrs.padsEnd[params.nDimsForWork]; ++i)
|
||||
cpu_memcpy(&dstData[dstIdx + (params.srcODims[params.nDimsForWork] + i) * params.shift],
|
||||
&srcData[srcIdx + srcShift - i * params.shift], params.shift);
|
||||
&srcData[srcIdx + srcShift - i * params.shift],
|
||||
params.shift);
|
||||
|
||||
parallel_step(params.nDimsForWork, params.dstDims, indexes);
|
||||
}
|
||||
@ -556,6 +650,6 @@ bool Pad::created() const {
|
||||
return getType() == Type::Pad;
|
||||
}
|
||||
|
||||
} // namespace node
|
||||
} // namespace intel_cpu
|
||||
} // namespace ov
|
||||
} // namespace node
|
||||
} // namespace intel_cpu
|
||||
} // namespace ov
|
||||
|
@ -24,8 +24,9 @@ public:
|
||||
bool created() const override;
|
||||
|
||||
void prepareParams() override;
|
||||
|
||||
bool needShapeInfer() const override;
|
||||
bool isExecutable() const override;
|
||||
bool needPrepareParams() const override;
|
||||
|
||||
protected:
|
||||
void executeDynamicImpl(dnnl::stream strm) override;
|
||||
@ -46,10 +47,14 @@ private:
|
||||
int beginPadIdx = 0;
|
||||
int endPadIdx = 0;
|
||||
InferenceEngine::Precision prc;
|
||||
bool constPadValue = false;
|
||||
} attrs;
|
||||
|
||||
struct PadExecutor {
|
||||
PadExecutor(const PadAttrs& params, const VectorDims& srcDims, const VectorDims& dstDims);
|
||||
PadExecutor(const PadAttrs& attrs,
|
||||
const std::vector<MemoryCPtr>& srcMemory,
|
||||
const std::vector<MemoryCPtr>& dstMemory,
|
||||
const std::string& errorPrefix);
|
||||
void exec(MemoryPtr& srcMemPtr, MemoryPtr& dstMemPtr);
|
||||
~PadExecutor() = default;
|
||||
|
||||
@ -59,7 +64,10 @@ private:
|
||||
void padConstantZero(MemoryPtr& srcMemPtr, MemoryPtr& dstMemPtr);
|
||||
void padEdge(MemoryPtr& srcMemPtr, MemoryPtr& dstMemPtr);
|
||||
void padReflectOrSymmetric(MemoryPtr& srcMemPtr, MemoryPtr& dstMemPtr, const bool isSymmetric = false);
|
||||
|
||||
void paramsInitialization(const PadAttrs& attrs,
|
||||
const std::vector<MemoryCPtr>& srcMemory,
|
||||
const std::vector<MemoryCPtr>& dstMemory);
|
||||
void workPartition();
|
||||
inline void getDstIdx(const VectorDims& indexes, size_t& dstIdx) const;
|
||||
|
||||
struct PadContext {
|
||||
@ -93,6 +101,7 @@ private:
|
||||
size_t dataSize = 1lu;
|
||||
PadMode padMode;
|
||||
} params;
|
||||
const std::string errorPrefix;
|
||||
};
|
||||
|
||||
static constexpr size_t DATA_ID = 0lu;
|
||||
@ -104,6 +113,11 @@ private:
|
||||
|
||||
using executorPtr = std::shared_ptr<PadExecutor>;
|
||||
executorPtr execPtr = nullptr;
|
||||
std::vector<MemoryCPtr> srcMemory;
|
||||
std::vector<MemoryCPtr> dstMemory;
|
||||
std::string errorPrefix;
|
||||
bool shapeHasDataDependency = false;
|
||||
bool constPadValue = false;
|
||||
};
|
||||
|
||||
} // namespace node
|
||||
|
@ -5,6 +5,7 @@
|
||||
#include <shared_test_classes/single_layer/pad.hpp>
|
||||
#include "test_utils/cpu_test_utils.hpp"
|
||||
#include "shared_test_classes/base/ov_subgraph.hpp"
|
||||
#include <common_test_utils/ov_tensor_utils.hpp>
|
||||
|
||||
using namespace InferenceEngine;
|
||||
using namespace CPUTestUtils;
|
||||
@ -15,6 +16,7 @@ namespace CPULayerTestsDefinitions {
|
||||
|
||||
using PadLayerCPUTestParamSet = std::tuple<
|
||||
InputShape, // Input shape
|
||||
ngraph::helpers::InputLayerType, // Secondary input types
|
||||
ElementType, // Input element type
|
||||
std::vector<int64_t>, // padsBegin
|
||||
std::vector<int64_t>, // padsEnd
|
||||
@ -28,12 +30,13 @@ class PadLayerCPUTest : public testing::WithParamInterface<PadLayerCPUTestParamS
|
||||
public:
|
||||
static std::string getTestCaseName(testing::TestParamInfo<PadLayerCPUTestParamSet> obj) {
|
||||
InputShape shapes;
|
||||
ngraph::helpers::InputLayerType secondaryInputType;
|
||||
ElementType elementType;
|
||||
std::vector<int64_t> padsBegin, padsEnd;
|
||||
ngraph::helpers::PadMode padMode;
|
||||
float argPadValue;
|
||||
CPUSpecificParams cpuParams;
|
||||
std::tie(shapes, elementType, padsBegin, padsEnd, argPadValue, padMode, cpuParams) = obj.param;
|
||||
std::tie(shapes, secondaryInputType, elementType, padsBegin, padsEnd, argPadValue, padMode, cpuParams) = obj.param;
|
||||
|
||||
std::ostringstream results;
|
||||
results << "IS=" << CommonTestUtils::partialShape2str({shapes.first}) << "_";
|
||||
@ -41,6 +44,7 @@ public:
|
||||
for (const auto& item : shapes.second) {
|
||||
results << CommonTestUtils::vec2str(item) << "_";
|
||||
}
|
||||
results << "secondaryInputType=" << secondaryInputType << "_";
|
||||
results << "Prc=" << elementType << "_";
|
||||
results << "padsBegin=" << CommonTestUtils::vec2str(padsBegin) << "_";
|
||||
results << "padsEnd=" << CommonTestUtils::vec2str(padsEnd) << "_";
|
||||
@ -54,23 +58,66 @@ public:
|
||||
}
|
||||
|
||||
protected:
|
||||
void generate_inputs(const std::vector<ngraph::Shape>& targetInputStaticShapes) override {
|
||||
std::vector<void*> inputValues = {padsBegin.data(), padsEnd.data(), &padValue};
|
||||
|
||||
inputs.clear();
|
||||
const auto& funcInputs = function->inputs();
|
||||
for (int i = 0; i < funcInputs.size(); ++i) {
|
||||
const auto& funcInput = funcInputs[i];
|
||||
ov::Tensor tensor;
|
||||
if (i == 0) {
|
||||
tensor = ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[i], 10, 1, 1);
|
||||
} else {
|
||||
if (funcInput.get_node()->get_friendly_name() == "pad_value")
|
||||
tensor = ov::Tensor{funcInput.get_element_type(), ov::Shape{}, &padValue};
|
||||
else
|
||||
tensor = ov::Tensor{funcInput.get_element_type(), targetInputStaticShapes[i], inputValues[i-1]};
|
||||
}
|
||||
inputs.insert({funcInput.get_node_shared_ptr(), tensor});
|
||||
}
|
||||
}
|
||||
void SetUp() override {
|
||||
InputShape shapes;
|
||||
std::vector<int64_t> padsBegin, padsEnd;
|
||||
ngraph::helpers::InputLayerType secondaryInputType;
|
||||
ngraph::helpers::PadMode padMode;
|
||||
float argPadValue;
|
||||
ov::element::Type dataType;
|
||||
CPUSpecificParams cpuParams;
|
||||
std::tie(shapes, inType, padsBegin, padsEnd, argPadValue, padMode, cpuParams) = this->GetParam();
|
||||
std::tie(shapes, secondaryInputType, dataType, padsBegin, padsEnd, padValue, padMode, cpuParams) = this->GetParam();
|
||||
|
||||
std::tie(inFmts, outFmts, priority, selectedType) = cpuParams;
|
||||
selectedType = makeSelectedTypeStr("ref", inType);
|
||||
selectedType = makeSelectedTypeStr("ref", dataType);
|
||||
targetDevice = CommonTestUtils::DEVICE_CPU;
|
||||
init_input_shapes({shapes});
|
||||
for (auto& targetShapes : targetStaticShapes) {
|
||||
targetShapes.push_back({padsBegin.size()});
|
||||
targetShapes.push_back({padsEnd.size()});
|
||||
targetShapes.push_back({});
|
||||
}
|
||||
auto params = ngraph::builder::makeDynamicParams(dataType, inputDynamicShapes);
|
||||
std::shared_ptr<ngraph::Node> pad;
|
||||
if (secondaryInputType == ngraph::helpers::InputLayerType::PARAMETER) {
|
||||
ov::Shape inShape = {padsBegin.size()};
|
||||
|
||||
auto params = ngraph::builder::makeDynamicParams(inType, inputDynamicShapes);
|
||||
auto pad = ngraph::builder::makePad(params[0], padsBegin, padsEnd, argPadValue, padMode);
|
||||
auto beginNode = std::make_shared<ngraph::opset1::Parameter>(ov::element::i64, inShape);
|
||||
auto endNode = std::make_shared<ngraph::opset1::Parameter>(ov::element::i64, inShape);
|
||||
std::shared_ptr<ngraph::opset1::Parameter> valueNode = nullptr;
|
||||
params.push_back(std::dynamic_pointer_cast<ngraph::opset3::Parameter>(beginNode));
|
||||
params.push_back(std::dynamic_pointer_cast<ngraph::opset3::Parameter>(endNode));
|
||||
if (padMode == ngraph::helpers::PadMode::CONSTANT) {
|
||||
valueNode = std::make_shared<ngraph::opset1::Parameter>(dataType, ov::Shape{});
|
||||
params.push_back(std::dynamic_pointer_cast<ngraph::opset3::Parameter>(valueNode));
|
||||
params.back()->set_friendly_name("pad_value");
|
||||
}
|
||||
pad = ngraph::builder::makePad(params[0], beginNode, endNode, valueNode, padMode);
|
||||
} else {
|
||||
pad = ngraph::builder::makePad(params[0], padsBegin, padsEnd, padValue, padMode);
|
||||
}
|
||||
function = makeNgraphFunction(inType, params, pad, "Pad");
|
||||
}
|
||||
std::vector<int64_t> padsBegin; // padsBegin
|
||||
std::vector<int64_t> padsEnd; // padsEnd
|
||||
float padValue; // argPadValue
|
||||
};
|
||||
|
||||
TEST_P(PadLayerCPUTest, CompareWithRefs) {
|
||||
@ -99,6 +146,15 @@ const std::vector<ElementType> inputPrecisions = {
|
||||
ElementType::i8
|
||||
};
|
||||
|
||||
const std::vector<ngraph::helpers::InputLayerType> inputLayerTypes = {
|
||||
ngraph::helpers::InputLayerType::CONSTANT,
|
||||
ngraph::helpers::InputLayerType::PARAMETER
|
||||
};
|
||||
|
||||
const std::vector<ngraph::helpers::InputLayerType> inputLayerTypesBlocked = {
|
||||
ngraph::helpers::InputLayerType::CONSTANT,
|
||||
};
|
||||
|
||||
const std::vector<float> argPadValue = {0.f, 2.5f, -1.f};
|
||||
|
||||
const std::vector<ngraph::helpers::PadMode> padMode = {
|
||||
@ -137,6 +193,7 @@ INSTANTIATE_TEST_SUITE_P(
|
||||
PadLayerCPUTest,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(static_shapes_to_test_representation({{3, 16, 5, 5}})),
|
||||
::testing::ValuesIn(inputLayerTypesBlocked),
|
||||
::testing::ValuesIn(inputPrecisions),
|
||||
::testing::ValuesIn(padsBegin4DConstBlocked_Smoke),
|
||||
::testing::ValuesIn(padsEnd4DConstBlocked_Smoke),
|
||||
@ -151,6 +208,7 @@ INSTANTIATE_TEST_SUITE_P(
|
||||
PadLayerCPUTest,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(static_shapes_to_test_representation({{3, 16, 5, 5}})),
|
||||
::testing::ValuesIn(inputLayerTypes),
|
||||
::testing::ValuesIn(inputPrecisions),
|
||||
::testing::ValuesIn(padsBegin4D_Smoke),
|
||||
::testing::ValuesIn(padsEnd4D_Smoke),
|
||||
@ -165,6 +223,7 @@ INSTANTIATE_TEST_SUITE_P(
|
||||
PadLayerCPUTest,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(static_shapes_to_test_representation({{3, 16, 10, 5}})),
|
||||
::testing::ValuesIn(inputLayerTypesBlocked),
|
||||
::testing::ValuesIn(inputPrecisions),
|
||||
::testing::ValuesIn(padsBegin4DBlocked_Smoke),
|
||||
::testing::ValuesIn(padsEnd4DBlocked_Smoke),
|
||||
@ -179,6 +238,7 @@ INSTANTIATE_TEST_SUITE_P(
|
||||
PadLayerCPUTest,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(static_shapes_to_test_representation({{3, 16, 10, 5}})),
|
||||
::testing::ValuesIn(inputLayerTypes),
|
||||
::testing::ValuesIn(inputPrecisions),
|
||||
::testing::ValuesIn(padsBegin4DBlocked_Smoke),
|
||||
::testing::ValuesIn(padsEnd4DBlocked_Smoke),
|
||||
@ -193,6 +253,7 @@ INSTANTIATE_TEST_SUITE_P(
|
||||
PadLayerCPUTest,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(static_shapes_to_test_representation({{3, 16, 5, 5}})),
|
||||
::testing::ValuesIn(inputLayerTypesBlocked),
|
||||
::testing::ValuesIn(inputPrecisions),
|
||||
::testing::ValuesIn(padsBegin4DConstBlocked_Full),
|
||||
::testing::ValuesIn(padsEnd4DConstBlocked_Full),
|
||||
@ -207,6 +268,7 @@ INSTANTIATE_TEST_SUITE_P(
|
||||
PadLayerCPUTest,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(static_shapes_to_test_representation({{3, 16, 5, 5}})),
|
||||
::testing::ValuesIn(inputLayerTypes),
|
||||
::testing::ValuesIn(inputPrecisions),
|
||||
::testing::ValuesIn(padsBegin4D_Full),
|
||||
::testing::ValuesIn(padsEnd4D_Full),
|
||||
@ -221,6 +283,7 @@ INSTANTIATE_TEST_SUITE_P(
|
||||
PadLayerCPUTest,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(static_shapes_to_test_representation({{3, 16, 10, 5}})),
|
||||
::testing::ValuesIn(inputLayerTypesBlocked),
|
||||
::testing::ValuesIn(inputPrecisions),
|
||||
::testing::ValuesIn(padsBegin4DBlocked_Full),
|
||||
::testing::ValuesIn(padsEnd4DBlocked_Full),
|
||||
@ -235,6 +298,7 @@ INSTANTIATE_TEST_SUITE_P(
|
||||
PadLayerCPUTest,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(static_shapes_to_test_representation({{3, 16, 10, 5}})),
|
||||
::testing::ValuesIn(inputLayerTypes),
|
||||
::testing::ValuesIn(inputPrecisions),
|
||||
::testing::ValuesIn(padsBegin4DBlocked_Full),
|
||||
::testing::ValuesIn(padsEnd4DBlocked_Full),
|
||||
@ -269,6 +333,7 @@ INSTANTIATE_TEST_SUITE_P(
|
||||
PadLayerCPUTest,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(inputShapesDynamic4D),
|
||||
::testing::ValuesIn(inputLayerTypes),
|
||||
::testing::ValuesIn(inputPrecisions),
|
||||
::testing::ValuesIn(padsBegin4D_Smoke),
|
||||
::testing::ValuesIn(padsEnd4D_Smoke),
|
||||
@ -283,6 +348,7 @@ INSTANTIATE_TEST_SUITE_P(
|
||||
PadLayerCPUTest,
|
||||
::testing::Combine(
|
||||
::testing::Values(inputShapesDynamic4D[1]),
|
||||
::testing::ValuesIn(inputLayerTypesBlocked),
|
||||
::testing::ValuesIn(inputPrecisions),
|
||||
::testing::ValuesIn(padsBegin4DConstBlocked_Smoke),
|
||||
::testing::ValuesIn(padsEnd4DConstBlocked_Smoke),
|
||||
@ -297,6 +363,7 @@ INSTANTIATE_TEST_SUITE_P(
|
||||
PadLayerCPUTest,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(inputShapesDynamic4D),
|
||||
::testing::ValuesIn(inputLayerTypes),
|
||||
::testing::ValuesIn(inputPrecisions),
|
||||
::testing::ValuesIn(padsBegin4D_Smoke),
|
||||
::testing::ValuesIn(padsEnd4D_Smoke),
|
||||
@ -311,6 +378,7 @@ INSTANTIATE_TEST_SUITE_P(
|
||||
PadLayerCPUTest,
|
||||
::testing::Combine(
|
||||
::testing::Values(inputShapesDynamic4D[1]),
|
||||
::testing::ValuesIn(inputLayerTypesBlocked),
|
||||
::testing::ValuesIn(inputPrecisions),
|
||||
::testing::ValuesIn(padsBegin4DBlocked_Smoke),
|
||||
::testing::ValuesIn(padsEnd4DBlocked_Smoke),
|
||||
@ -325,6 +393,7 @@ INSTANTIATE_TEST_SUITE_P(
|
||||
PadLayerCPUTest,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(inputShapesDynamic4D),
|
||||
::testing::ValuesIn(inputLayerTypes),
|
||||
::testing::ValuesIn(inputPrecisions),
|
||||
::testing::ValuesIn(padsBegin4D_Full),
|
||||
::testing::ValuesIn(padsEnd4D_Full),
|
||||
@ -339,6 +408,7 @@ INSTANTIATE_TEST_SUITE_P(
|
||||
PadLayerCPUTest,
|
||||
::testing::Combine(
|
||||
::testing::Values(inputShapesDynamic4D[1]),
|
||||
::testing::ValuesIn(inputLayerTypesBlocked),
|
||||
::testing::ValuesIn(inputPrecisions),
|
||||
::testing::ValuesIn(padsBegin4DConstBlocked_Full),
|
||||
::testing::ValuesIn(padsEnd4DConstBlocked_Full),
|
||||
@ -353,6 +423,7 @@ INSTANTIATE_TEST_SUITE_P(
|
||||
PadLayerCPUTest,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(inputShapesDynamic4D),
|
||||
::testing::ValuesIn(inputLayerTypes),
|
||||
::testing::ValuesIn(inputPrecisions),
|
||||
::testing::ValuesIn(padsBegin4D_Full),
|
||||
::testing::ValuesIn(padsEnd4D_Full),
|
||||
@ -367,6 +438,7 @@ INSTANTIATE_TEST_SUITE_P(
|
||||
PadLayerCPUTest,
|
||||
::testing::Combine(
|
||||
::testing::Values(inputShapesDynamic4D[1]),
|
||||
::testing::ValuesIn(inputLayerTypesBlocked),
|
||||
::testing::ValuesIn(inputPrecisions),
|
||||
::testing::ValuesIn(padsBegin4DBlocked_Full),
|
||||
::testing::ValuesIn(padsEnd4DBlocked_Full),
|
||||
@ -408,6 +480,7 @@ INSTANTIATE_TEST_SUITE_P(
|
||||
PadLayerCPUTest,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(static_shapes_to_test_representation({{3, 16, 5, 5, 5}})),
|
||||
::testing::ValuesIn(inputLayerTypesBlocked),
|
||||
::testing::ValuesIn(inputPrecisions),
|
||||
::testing::ValuesIn(padsBegin5DConstBlocked_Smoke),
|
||||
::testing::ValuesIn(padsEnd5DConstBlocked_Smoke),
|
||||
@ -422,6 +495,7 @@ INSTANTIATE_TEST_SUITE_P(
|
||||
PadLayerCPUTest,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(static_shapes_to_test_representation({{3, 16, 5, 5, 5}})),
|
||||
::testing::ValuesIn(inputLayerTypes),
|
||||
::testing::ValuesIn(inputPrecisions),
|
||||
::testing::ValuesIn(padsBegin5D_Smoke),
|
||||
::testing::ValuesIn(padsEnd5D_Smoke),
|
||||
@ -436,6 +510,7 @@ INSTANTIATE_TEST_SUITE_P(
|
||||
PadLayerCPUTest,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(static_shapes_to_test_representation({{3, 16, 5, 5, 5}})),
|
||||
::testing::ValuesIn(inputLayerTypesBlocked),
|
||||
::testing::ValuesIn(inputPrecisions),
|
||||
::testing::ValuesIn(padsBegin5DBlocked_Smoke),
|
||||
::testing::ValuesIn(padsEnd5DBlocked_Smoke),
|
||||
@ -450,6 +525,7 @@ INSTANTIATE_TEST_SUITE_P(
|
||||
PadLayerCPUTest,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(static_shapes_to_test_representation({{3, 16, 5, 5, 5}})),
|
||||
::testing::ValuesIn(inputLayerTypes),
|
||||
::testing::ValuesIn(inputPrecisions),
|
||||
::testing::ValuesIn(padsBegin5D_Smoke),
|
||||
::testing::ValuesIn(padsEnd5D_Smoke),
|
||||
@ -464,6 +540,7 @@ INSTANTIATE_TEST_SUITE_P(
|
||||
PadLayerCPUTest,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(static_shapes_to_test_representation({{3, 16, 5, 5, 5}})),
|
||||
::testing::ValuesIn(inputLayerTypesBlocked),
|
||||
::testing::ValuesIn(inputPrecisions),
|
||||
::testing::ValuesIn(padsBegin5DConstBlocked_Full),
|
||||
::testing::ValuesIn(padsEnd5DConstBlocked_Full),
|
||||
@ -478,6 +555,7 @@ INSTANTIATE_TEST_SUITE_P(
|
||||
PadLayerCPUTest,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(static_shapes_to_test_representation({{3, 16, 5, 5, 5}})),
|
||||
::testing::ValuesIn(inputLayerTypes),
|
||||
::testing::ValuesIn(inputPrecisions),
|
||||
::testing::ValuesIn(padsBegin5D_Full),
|
||||
::testing::ValuesIn(padsEnd5D_Full),
|
||||
@ -492,6 +570,7 @@ INSTANTIATE_TEST_SUITE_P(
|
||||
PadLayerCPUTest,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(static_shapes_to_test_representation({{3, 16, 5, 5, 5}})),
|
||||
::testing::ValuesIn(inputLayerTypesBlocked),
|
||||
::testing::ValuesIn(inputPrecisions),
|
||||
::testing::ValuesIn(padsBegin5DBlocked_Full),
|
||||
::testing::ValuesIn(padsEnd5DBlocked_Full),
|
||||
@ -506,6 +585,7 @@ INSTANTIATE_TEST_SUITE_P(
|
||||
PadLayerCPUTest,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(static_shapes_to_test_representation({{3, 16, 5, 5, 5}})),
|
||||
::testing::ValuesIn(inputLayerTypes),
|
||||
::testing::ValuesIn(inputPrecisions),
|
||||
::testing::ValuesIn(padsBegin5D_Full),
|
||||
::testing::ValuesIn(padsEnd5D_Full),
|
||||
@ -540,6 +620,7 @@ INSTANTIATE_TEST_SUITE_P(
|
||||
PadLayerCPUTest,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(inputShapesDynamic5D),
|
||||
::testing::ValuesIn(inputLayerTypes),
|
||||
::testing::ValuesIn(inputPrecisions),
|
||||
::testing::ValuesIn(padsBegin5D_Smoke),
|
||||
::testing::ValuesIn(padsEnd5D_Smoke),
|
||||
@ -554,6 +635,7 @@ INSTANTIATE_TEST_SUITE_P(
|
||||
PadLayerCPUTest,
|
||||
::testing::Combine(
|
||||
::testing::Values(inputShapesDynamic5D[1]),
|
||||
::testing::ValuesIn(inputLayerTypesBlocked),
|
||||
::testing::ValuesIn(inputPrecisions),
|
||||
::testing::ValuesIn(padsBegin5DConstBlocked_Smoke),
|
||||
::testing::ValuesIn(padsEnd5DConstBlocked_Smoke),
|
||||
@ -568,6 +650,7 @@ INSTANTIATE_TEST_SUITE_P(
|
||||
PadLayerCPUTest,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(inputShapesDynamic5D),
|
||||
::testing::ValuesIn(inputLayerTypes),
|
||||
::testing::ValuesIn(inputPrecisions),
|
||||
::testing::ValuesIn(padsBegin5D_Smoke),
|
||||
::testing::ValuesIn(padsEnd5D_Smoke),
|
||||
@ -582,6 +665,7 @@ INSTANTIATE_TEST_SUITE_P(
|
||||
PadLayerCPUTest,
|
||||
::testing::Combine(
|
||||
::testing::Values(inputShapesDynamic5D[1]),
|
||||
::testing::ValuesIn(inputLayerTypesBlocked),
|
||||
::testing::ValuesIn(inputPrecisions),
|
||||
::testing::ValuesIn(padsBegin5DBlocked_Smoke),
|
||||
::testing::ValuesIn(padsEnd5DBlocked_Smoke),
|
||||
@ -596,6 +680,7 @@ INSTANTIATE_TEST_SUITE_P(
|
||||
PadLayerCPUTest,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(inputShapesDynamic5D),
|
||||
::testing::ValuesIn(inputLayerTypes),
|
||||
::testing::ValuesIn(inputPrecisions),
|
||||
::testing::ValuesIn(padsBegin5D_Full),
|
||||
::testing::ValuesIn(padsEnd5D_Full),
|
||||
@ -610,6 +695,7 @@ INSTANTIATE_TEST_SUITE_P(
|
||||
PadLayerCPUTest,
|
||||
::testing::Combine(
|
||||
::testing::Values(inputShapesDynamic5D[1]),
|
||||
::testing::ValuesIn(inputLayerTypesBlocked),
|
||||
::testing::ValuesIn(inputPrecisions),
|
||||
::testing::ValuesIn(padsBegin5DConstBlocked_Full),
|
||||
::testing::ValuesIn(padsEnd5DConstBlocked_Full),
|
||||
@ -624,6 +710,7 @@ INSTANTIATE_TEST_SUITE_P(
|
||||
PadLayerCPUTest,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(inputShapesDynamic5D),
|
||||
::testing::ValuesIn(inputLayerTypes),
|
||||
::testing::ValuesIn(inputPrecisions),
|
||||
::testing::ValuesIn(padsBegin5D_Full),
|
||||
::testing::ValuesIn(padsEnd5D_Full),
|
||||
@ -638,6 +725,7 @@ INSTANTIATE_TEST_SUITE_P(
|
||||
PadLayerCPUTest,
|
||||
::testing::Combine(
|
||||
::testing::Values(inputShapesDynamic5D[1]),
|
||||
::testing::ValuesIn(inputLayerTypesBlocked),
|
||||
::testing::ValuesIn(inputPrecisions),
|
||||
::testing::ValuesIn(padsBegin5DBlocked_Full),
|
||||
::testing::ValuesIn(padsEnd5DBlocked_Full),
|
||||
|
@ -506,6 +506,12 @@ std::shared_ptr<ngraph::Node> makePad(const ngraph::Output<Node>& data,
|
||||
float argPadValue,
|
||||
ngraph::helpers::PadMode padMode);
|
||||
|
||||
std::shared_ptr<ov::Node> makePad(const ov::Output<Node>& in,
|
||||
const ov::Output<Node>& beginNode,
|
||||
const ov::Output<Node>& endNode,
|
||||
const ov::Output<Node>& valueNode,
|
||||
ngraph::helpers::PadMode padMode);
|
||||
|
||||
std::shared_ptr<ngraph::Node> makeBatchNormInference(const ngraph::Output<Node>& data,
|
||||
double epsilon);
|
||||
|
||||
|
@ -39,5 +39,34 @@ std::shared_ptr<ngraph::Node> makePad(const ngraph::Output<Node>& data,
|
||||
auto arg_pad_value = std::make_shared<ngraph::opset3::Constant>(data.get_element_type(), ngraph::Shape{}, &argPadValue);
|
||||
return std::make_shared<ngraph::opset3::Pad>(data, pads_begin, pads_end, arg_pad_value, pad_mode);
|
||||
}
|
||||
|
||||
std::shared_ptr<ov::Node> makePad(const ov::Output<Node>& in,
|
||||
const ov::Output<Node>& beginNode,
|
||||
const ov::Output<Node>& endNode,
|
||||
const ov::Output<Node>& valueNode,
|
||||
ngraph::helpers::PadMode padMode) {
|
||||
ngraph::op::PadMode pad_mode;
|
||||
switch (padMode) {
|
||||
case ngraph::helpers::PadMode::CONSTANT:
|
||||
pad_mode = ngraph::op::PadMode::CONSTANT;
|
||||
break;
|
||||
case ngraph::helpers::PadMode::EDGE:
|
||||
pad_mode = ngraph::op::PadMode::EDGE;
|
||||
break;
|
||||
case ngraph::helpers::PadMode::REFLECT:
|
||||
pad_mode = ngraph::op::PadMode::REFLECT;
|
||||
break;
|
||||
case ngraph::helpers::PadMode::SYMMETRIC:
|
||||
pad_mode = ngraph::op::PadMode::SYMMETRIC;
|
||||
break;
|
||||
default:
|
||||
throw std::runtime_error("Can't create layer for this pad mode");
|
||||
}
|
||||
if (valueNode.get_node_shared_ptr() == nullptr)
|
||||
return std::make_shared<ov::op::v1::Pad>(in, beginNode, endNode, pad_mode);
|
||||
else
|
||||
return std::make_shared<ov::op::v1::Pad>(in, beginNode, endNode, valueNode, pad_mode);
|
||||
}
|
||||
|
||||
} // namespace builder
|
||||
} // namespace ngraph
|
||||
|
Loading…
Reference in New Issue
Block a user