[CPU] ROIAlign: dynamic shapes support (#8571)

This commit is contained in:
Vladislav Golubev 2021-11-26 18:45:51 +03:00 committed by GitHub
parent 734185c04c
commit f59ece3cde
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 157 additions and 73 deletions

View File

@ -25,11 +25,7 @@ using ngPoolingMode = ngraph::op::v3::ROIAlign::PoolingMode;
bool MKLDNNROIAlignNode::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept { bool MKLDNNROIAlignNode::isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept {
try { try {
if (isDynamicNgraphNode(op)) { auto roiAlign = ngraph::as_type_ptr<const ngraph::opset3::ROIAlign>(op);
errorMessage = "Doesn't support op with dynamic shapes";
return false;
}
const auto roiAlign = std::dynamic_pointer_cast<const ngraph::opset3::ROIAlign>(op);
if (!roiAlign) { if (!roiAlign) {
errorMessage = "Only opset3 ROIAlign operation is supported"; errorMessage = "Only opset3 ROIAlign operation is supported";
return false; return false;
@ -52,7 +48,7 @@ MKLDNNROIAlignNode::MKLDNNROIAlignNode(const std::shared_ptr<ngraph::Node>& op,
if (isSupportedOperation(op, errorMessage)) { if (isSupportedOperation(op, errorMessage)) {
errorPrefix = "ROIPooling layer with name '" + getName() + "' "; errorPrefix = "ROIPooling layer with name '" + getName() + "' ";
const auto roiAlign = std::dynamic_pointer_cast<const ngraph::opset3::ROIAlign>(op); auto roiAlign = ngraph::as_type_ptr<const ngraph::opset3::ROIAlign>(op);
pooledH = roiAlign->get_pooled_h(); pooledH = roiAlign->get_pooled_h();
pooledW = roiAlign->get_pooled_w(); pooledW = roiAlign->get_pooled_w();
spatialScale = roiAlign->get_spatial_scale(); spatialScale = roiAlign->get_spatial_scale();
@ -93,15 +89,15 @@ void MKLDNNROIAlignNode::getSupportedDescriptors() {
IE_THROW() << errorPrefix << "doesn't support output with rank: " << getOutputShapeAtPort(0).getRank(); IE_THROW() << errorPrefix << "doesn't support output with rank: " << getOutputShapeAtPort(0).getRank();
} }
if (getInputShapeAtPort(1).getStaticDims()[1] != 4) { const auto& proposalsDims = getInputShapeAtPort(1).getDims();
IE_THROW() << errorPrefix << "has invalid shape on 1st input: [" if (proposalsDims[1] != 4) {
<< getInputShapeAtPort(1).getStaticDims()[0] << "," << getInputShapeAtPort(1).getStaticDims()[1] << "]"; IE_THROW() << errorPrefix << "has invalid shape on 1st input: [" << proposalsDims[0] << "," << proposalsDims[1] << "]";
} }
if (getInputShapeAtPort(1).getStaticDims()[0] != getInputShapeAtPort(2).getStaticDims()[0]) { const auto& indexesDims = getInputShapeAtPort(2).getDims();
if (!dimsEqualWeak(proposalsDims[0], indexesDims[0])) {
IE_THROW() << errorPrefix << "has different sizes of inputs for proposals (" IE_THROW() << errorPrefix << "has different sizes of inputs for proposals ("
<< getInputShapeAtPort(1).getStaticDims()[0] << ") and indexes (" << proposalsDims[0] << ") and indexes (" << indexesDims[0] << ")";
<< getInputShapeAtPort(2).getStaticDims()[0] << ")";
} }
} }
@ -368,6 +364,18 @@ bool MKLDNNROIAlignNode::created() const {
return getType() == ROIAlign; return getType() == ROIAlign;
} }
void MKLDNNROIAlignNode::createPrimitive() {} bool MKLDNNROIAlignNode::needPrepareParams() const {
return false;
}
void MKLDNNROIAlignNode::executeDynamicImpl(mkldnn::stream strm) {
return execute(strm);
}
void MKLDNNROIAlignNode::createPrimitive() {
if (inputShapesDefined()) {
updateLastInputDims();
}
}
REG_MKLDNN_PRIM_FOR(MKLDNNROIAlignNode, ROIAlign) REG_MKLDNN_PRIM_FOR(MKLDNNROIAlignNode, ROIAlign)

View File

@ -23,6 +23,9 @@ public:
void execute(mkldnn::stream strm) override; void execute(mkldnn::stream strm) override;
bool created() const override; bool created() const override;
bool needPrepareParams() const override;
void executeDynamicImpl(mkldnn::stream strm) override;
static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept; static bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept;
private: private:

View File

@ -3,70 +3,129 @@
// //
#include "test_utils/cpu_test_utils.hpp" #include "test_utils/cpu_test_utils.hpp"
#include "functional_test_utils/ov_tensor_utils.hpp"
#include "shared_test_classes/base/ov_subgraph.hpp"
#include "ngraph_functions/builders.hpp" #include "ngraph_functions/builders.hpp"
#include "ngraph_functions/utils/ngraph_helpers.hpp" #include "ngraph_functions/utils/ngraph_helpers.hpp"
using namespace InferenceEngine; using namespace InferenceEngine;
using namespace CPUTestUtils; using namespace CPUTestUtils;
using namespace ov::test;
namespace CPULayerTestsDefinitions { namespace CPULayerTestsDefinitions {
namespace { using ROIAlignShapes = std::vector<InputShape>;
int pooledH;
int pooledW;
float spatialScale;
int samplingRatio;
std::pair<std::vector<float>, std::vector<size_t>> proposal;
std::string mode;
std::vector<size_t> inputShape;
} // namespace
typedef std::tuple< using ROIAlignSpecificParams = std::tuple<
int, // bin's column count int, // bin's column count
int, // bin's row count int, // bin's row count
float, // scale for given region considering actual input size float, // scale for given region considering actual input size
int, // pooling ratio int, // pooling ratio
std::pair<std::vector<float>, std::vector<size_t>>, // united proposal vector of coordinates and indexes
std::string, // pooling mode std::string, // pooling mode
std::vector<size_t> // feature map shape ROIAlignShapes
> ROIAlignSpecificParams; >;
typedef std::tuple< using ROIAlignLayerTestParams = std::tuple<
ROIAlignSpecificParams, ROIAlignSpecificParams,
InferenceEngine::Precision, // Net precision ElementType, // Net precision
LayerTestsUtils::TargetDevice // Device name LayerTestsUtils::TargetDevice // Device name
> ROIAlignLayerTestParams; >;
typedef std::tuple< using ROIAlignLayerCPUTestParamsSet = std::tuple<
CPULayerTestsDefinitions::ROIAlignLayerTestParams, CPULayerTestsDefinitions::ROIAlignLayerTestParams,
CPUSpecificParams> ROIAlignLayerCPUTestParamsSet; CPUSpecificParams>;
class ROIAlignLayerCPUTest : public testing::WithParamInterface<ROIAlignLayerCPUTestParamsSet>, class ROIAlignLayerCPUTest : public testing::WithParamInterface<ROIAlignLayerCPUTestParamsSet>,
virtual public LayerTestsUtils::LayerTestsCommon, public CPUTestsBase { public SubgraphBaseTest, public CPUTestsBase {
public: public:
static std::string getTestCaseName(testing::TestParamInfo<ROIAlignLayerCPUTestParamsSet> obj) { static std::string getTestCaseName(testing::TestParamInfo<ROIAlignLayerCPUTestParamsSet> obj) {
CPULayerTestsDefinitions::ROIAlignLayerTestParams basicParamsSet; CPULayerTestsDefinitions::ROIAlignLayerTestParams basicParamsSet;
CPUSpecificParams cpuParams; CPUSpecificParams cpuParams;
std::tie(basicParamsSet, cpuParams) = obj.param; std::tie(basicParamsSet, cpuParams) = obj.param;
std::string td; std::string td;
Precision netPr; ElementType netPrecision;
ROIAlignSpecificParams roiPar; ROIAlignSpecificParams roiPar;
std::tie(roiPar, netPr, td) = basicParamsSet; std::tie(roiPar, netPrecision, td) = basicParamsSet;
std::tie(pooledH, pooledW, spatialScale, samplingRatio,
proposal, mode, inputShape) = roiPar; int pooledH;
int pooledW;
float spatialScale;
int samplingRatio;
std::string mode;
ROIAlignShapes inputShapes;
std::tie(pooledH, pooledW, spatialScale, samplingRatio, mode, inputShapes) = roiPar;
std::ostringstream result; std::ostringstream result;
result << "ROIAlignTest_";
result << std::to_string(obj.index); result << netPrecision << "_IS=";
for (const auto& shape : inputShapes) {
result << CommonTestUtils::partialShape2str({ shape.first }) << "_";
}
result << "TS=";
for (const auto& shape : inputShapes) {
result << "(";
for (const auto& targetShape : shape.second) {
result << CommonTestUtils::vec2str(targetShape) << "_";
}
result << ")_";
}
result << "pooledH=" << pooledH << "_"; result << "pooledH=" << pooledH << "_";
result << "pooledW=" << pooledW << "_"; result << "pooledW=" << pooledW << "_";
result << "spatialScale=" << spatialScale << "_"; result << "spatialScale=" << spatialScale << "_";
result << "samplingRatio=" << samplingRatio << "_"; result << "samplingRatio=" << samplingRatio << "_";
result << (netPr == Precision::FP32 ? "FP32" : "BF16") << "_";
result << mode << "_"; result << mode << "_";
result << CPUTestsBase::getTestCaseName(cpuParams); result << CPUTestsBase::getTestCaseName(cpuParams);
return result.str(); return result.str();
} }
protected: protected:
void generate_inputs(const std::vector<ngraph::Shape>& targetInputStaticShapes) override {
inputs.clear();
const auto& funcInputs = function->inputs();
ov::runtime::Tensor data_tensor;
const auto& dataPrecision = funcInputs[0].get_element_type();
const auto& dataShape = targetInputStaticShapes.front();
data_tensor = ov::test::utils::create_and_fill_tensor(dataPrecision, dataShape, 10, 0, 1000);
const auto& coordsET = funcInputs[1].get_element_type();
auto coordsTensor = ov::runtime::Tensor{ coordsET, targetInputStaticShapes[1] };
if (coordsET == ElementType::f32) {
auto coordsTensorData = static_cast<float*>(coordsTensor.data());
for (size_t i = 0; i < coordsTensor.get_size(); i += 4) {
coordsTensorData[i] = 1.f;
coordsTensorData[i] = 1.f;
coordsTensorData[i] = 19.f;
coordsTensorData[i] = 19.f;
}
} else if (coordsET == ElementType::bf16) {
auto coordsTensorData = static_cast<std::int16_t*>(coordsTensor.data());
for (size_t i = 0; i < coordsTensor.get_size(); i += 4) {
coordsTensorData[i] = static_cast<std::int16_t>(ngraph::bfloat16(1.f).to_bits());
coordsTensorData[i] = static_cast<std::int16_t>(ngraph::bfloat16(1.f).to_bits());
coordsTensorData[i] = static_cast<std::int16_t>(ngraph::bfloat16(19.f).to_bits());
coordsTensorData[i] = static_cast<std::int16_t>(ngraph::bfloat16(19.f).to_bits());
}
} else {
IE_THROW() << "roi align. Unsupported precision: " << coordsET;
}
auto roisIdxTensor = ov::runtime::Tensor{ funcInputs[2].get_element_type(), targetInputStaticShapes[2] };
auto roisIdxTensorData = static_cast<std::int32_t*>(roisIdxTensor.data());
if (roisIdxTensor.get_size() == 1) {
roisIdxTensorData[0] = 1;
} else if (roisIdxTensor.get_size() == 2) {
roisIdxTensorData[0] = 0;
roisIdxTensorData[1] = 1;
} else {
IE_THROW() << "Unexpected roiIdx size: " << roisIdxTensor.get_size();
}
inputs.insert({ funcInputs[0].get_node_shared_ptr(), data_tensor });
inputs.insert({ funcInputs[1].get_node_shared_ptr(), coordsTensor });
inputs.insert({ funcInputs[2].get_node_shared_ptr(), roisIdxTensor });
}
void SetUp() override { void SetUp() override {
CPULayerTestsDefinitions::ROIAlignLayerTestParams basicParamsSet; CPULayerTestsDefinitions::ROIAlignLayerTestParams basicParamsSet;
CPUSpecificParams cpuParams; CPUSpecificParams cpuParams;
@ -74,36 +133,38 @@ protected:
std::tie(inFmts, outFmts, priority, selectedType) = cpuParams; std::tie(inFmts, outFmts, priority, selectedType) = cpuParams;
CPULayerTestsDefinitions::ROIAlignSpecificParams roiAlignParams; CPULayerTestsDefinitions::ROIAlignSpecificParams roiAlignParams;
auto netPrecision = InferenceEngine::Precision::UNSPECIFIED; ElementType inputPrecision;
std::tie(roiAlignParams, netPrecision, targetDevice) = basicParamsSet; std::tie(roiAlignParams, inputPrecision, targetDevice) = basicParamsSet;
inPrc = outPrc = netPrecision;
std::tie(pooledH, pooledW, spatialScale, samplingRatio,
proposal, mode, inputShape) = roiAlignParams;
std::vector<float> proposalVector = proposal.first; int pooledH;
std::vector<size_t> roiIdxVector = proposal.second; int pooledW;
float spatialScale;
int samplingRatio;
std::string mode;
ROIAlignShapes inputShapes;
std::tie(pooledH, pooledW, spatialScale, samplingRatio, mode, inputShapes) = roiAlignParams;
ngraph::Shape coordsShape = { proposalVector.size() / 4, 4 }; init_input_shapes(inputShapes);
ngraph::Shape idxVectorShape = { roiIdxVector.size() };
auto roisIdx = ngraph::builder::makeConstant<size_t>(ngraph::element::i32, idxVectorShape, roiIdxVector); auto float_params = ngraph::builder::makeDynamicParams(inputPrecision, { inputDynamicShapes[0], inputDynamicShapes[1] });
auto coords = ngraph::builder::makeConstant<float>(ngraph::element::f32, coordsShape, proposalVector); auto int_params = ngraph::builder::makeDynamicParams(ngraph::element::i32, { inputDynamicShapes[2] });
auto params = ngraph::builder::makeParams(ngraph::element::f32, {inputShape});
auto roialign = std::make_shared<ngraph::opset3::ROIAlign>(params[0], coords, roisIdx, pooledH, pooledW, auto roialign = std::make_shared<ngraph::opset3::ROIAlign>(float_params[0], float_params[1], int_params[0], pooledH, pooledW,
samplingRatio, spatialScale, mode); samplingRatio, spatialScale, mode);
roialign->get_rt_info() = getCPUInfo();
selectedType = std::string("unknown_") + inPrc.name();
threshold = 1e-2; selectedType = makeSelectedTypeStr("unknown", inputPrecision);
const ngraph::ResultVector results{std::make_shared<ngraph::opset3::Result>(roialign)}; if (inputPrecision == ElementType::bf16) {
function = std::make_shared<ngraph::Function>(results, params, "ROIAlign"); rel_threshold = 1e-2;
}
ngraph::ParameterVector params{ float_params[0], float_params[1], int_params[0] };
function = makeNgraphFunction(inputPrecision, params, roialign, "ROIAlign");
} }
}; };
TEST_P(ROIAlignLayerCPUTest, CompareWithRefs) { TEST_P(ROIAlignLayerCPUTest, CompareWithRefs) {
SKIP_IF_CURRENT_TEST_IS_DISABLED() SKIP_IF_CURRENT_TEST_IS_DISABLED()
Run(); run();
CheckPluginRelatedResults(executableNetwork, "ROIAlign"); CheckPluginRelatedResults(executableNetwork, "ROIAlign");
} }
@ -122,9 +183,9 @@ std::vector<CPUSpecificParams> filterCPUInfoForDevice() {
return resCPUParams; return resCPUParams;
} }
const std::vector<InferenceEngine::Precision> netPrecisions = { const std::vector<ElementType> netPrecisions = {
InferenceEngine::Precision::FP32, ElementType::f32,
InferenceEngine::Precision::BF16 ElementType::bf16,
}; };
const std::vector<int> spatialBinXVector = { 2 }; const std::vector<int> spatialBinXVector = { 2 };
@ -140,17 +201,30 @@ const std::vector<std::string> modeVector = {
"max" "max"
}; };
const std::vector<std::vector<size_t>> inputShapeVector = { const std::vector<ROIAlignShapes> inputShapeVector = {
SizeVector({ 2, 18, 20, 20 }), ROIAlignShapes{{{}, {{ 2, 18, 20, 20 }}}, {{}, {{2, 4}}}, {{}, {{2}}}},
SizeVector({ 2, 4, 20, 20 }), ROIAlignShapes{{{}, {{ 2, 4, 20, 20 }}}, {{}, {{2, 4}}}, {{}, {{2}}}},
SizeVector({ 2, 4, 20, 40 }), ROIAlignShapes{{{}, {{ 2, 4, 20, 40 }}}, {{}, {{2, 4}}}, {{}, {{2}}}},
SizeVector({ 10, 1, 20, 20 }) ROIAlignShapes{{{}, {{ 10, 1, 20, 20 }}}, {{}, {{2, 4}}}, {{}, {{2}}}},
}; ROIAlignShapes{{{}, {{ 2, 18, 20, 20 }}}, {{}, {{1, 4}}}, {{}, {{1}}}},
ROIAlignShapes{{{}, {{ 2, 4, 20, 20 }}}, {{}, {{1, 4}}}, {{}, {{1}}}},
ROIAlignShapes{{{}, {{ 2, 4, 20, 40 }}}, {{}, {{1, 4}}}, {{}, {{1}}}},
const std::vector<std::pair<std::vector<float>, std::vector<size_t>>> propVector = { ROIAlignShapes{{{}, {{ 10, 1, 20, 20 }}}, {{}, {{1, 4}}}, {{}, {{1}}}},
{{ 1, 1, 19, 19, 1, 1, 19, 19, }, { 0, 1 }}, ROIAlignShapes{
{{ 1, 1, 19, 19 }, { 1 }} {{-1, -1, -1, -1}, {{ 10, 1, 20, 20 }, { 2, 4, 20, 20 }, { 2, 18, 20, 20 }}},
{{-1, 4}, {{1, 4}, {2, 4}, {1, 4}}},
{{-1}, {{1}, {2}, {1}}}
},
ROIAlignShapes{
{{{2, 10}, { 1, 5 }, -1, -1}, {{ 2, 1, 20, 20 }, { 10, 5, 30, 20 }, { 4, 4, 40, 40 }}},
{{-1, 4}, {{2, 4}, {2, 4}, {1, 4}}},
{{-1}, {{2}, {2}, {1}}}
},
ROIAlignShapes{
{{{2, 10}, {1, 18}, {10, 30}, {15, 25}}, {{ 10, 1, 10, 15 }, { 2, 4, 20, 20 }, { 7, 18, 30, 25 }}},
{{{1, 2}, 4}, {{1, 4}, {2, 4}, {1, 4}}},
{{{1, 2}}, {{1}, {2}, {1}}}
},
}; };
const auto roiAlignParams = ::testing::Combine( const auto roiAlignParams = ::testing::Combine(
@ -158,7 +232,6 @@ const auto roiAlignParams = ::testing::Combine(
::testing::ValuesIn(spatialBinYVector), // bin's row count ::testing::ValuesIn(spatialBinYVector), // bin's row count
::testing::ValuesIn(spatialScaleVector), // scale for given region considering actual input size ::testing::ValuesIn(spatialScaleVector), // scale for given region considering actual input size
::testing::ValuesIn(poolingRatioVector), // pooling ratio for bin ::testing::ValuesIn(poolingRatioVector), // pooling ratio for bin
::testing::ValuesIn(propVector), // united vector of coordinates and batch id's
::testing::ValuesIn(modeVector), // pooling mode ::testing::ValuesIn(modeVector), // pooling mode
::testing::ValuesIn(inputShapeVector) // feature map shape ::testing::ValuesIn(inputShapeVector) // feature map shape
); );