[IE TESTS] Added test for NMS (#2830)
This commit is contained in:
parent
6467a9f5b8
commit
a166f95acc
@ -0,0 +1,42 @@
|
||||
// Copyright (C) 2020 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "single_layer_tests/non_max_suppression.hpp"
|
||||
#include "common_test_utils/test_constants.hpp"
|
||||
|
||||
using namespace LayerTestsDefinitions;
|
||||
using namespace InferenceEngine;
|
||||
using namespace ngraph;
|
||||
|
||||
const std::vector<InputShapeParams> inShapeParams = {
|
||||
InputShapeParams{3, 100, 5},
|
||||
InputShapeParams{1, 10, 50},
|
||||
InputShapeParams{2, 50, 50}
|
||||
};
|
||||
|
||||
const std::vector<int32_t> maxOutBoxPerClass = {5, 20};
|
||||
const std::vector<float> threshold = {0.3f, 0.7f};
|
||||
const std::vector<float> sigmaThreshold = {0.0f, 0.5f};
|
||||
const std::vector<op::v5::NonMaxSuppression::BoxEncodingType> encodType = {op::v5::NonMaxSuppression::BoxEncodingType::CENTER,
|
||||
op::v5::NonMaxSuppression::BoxEncodingType::CORNER};
|
||||
const std::vector<bool> sortResDesc = {true, false};
|
||||
const std::vector<element::Type> outType = {element::i32, element::i64};
|
||||
|
||||
const auto nmsParams = ::testing::Combine(::testing::ValuesIn(inShapeParams),
|
||||
::testing::Combine(::testing::Values(Precision::FP32),
|
||||
::testing::Values(Precision::I32),
|
||||
::testing::Values(Precision::FP32)),
|
||||
::testing::ValuesIn(maxOutBoxPerClass),
|
||||
::testing::ValuesIn(threshold),
|
||||
::testing::ValuesIn(threshold),
|
||||
::testing::ValuesIn(sigmaThreshold),
|
||||
::testing::ValuesIn(encodType),
|
||||
::testing::ValuesIn(sortResDesc),
|
||||
::testing::ValuesIn(outType),
|
||||
::testing::Values(CommonTestUtils::DEVICE_CPU)
|
||||
);
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(smoke_NmsLayerTest, NmsLayerTest, nmsParams, NmsLayerTest::getTestCaseName);
|
@ -0,0 +1,48 @@
|
||||
// Copyright (C) 2020 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <tuple>
|
||||
#include <string>
|
||||
|
||||
#include "functional_test_utils/layer_test_utils.hpp"
|
||||
#include "ngraph_functions/builders.hpp"
|
||||
|
||||
namespace LayerTestsDefinitions {
|
||||
|
||||
using InputShapeParams = std::tuple<size_t, // Number of batches
|
||||
size_t, // Number of boxes
|
||||
size_t>; // Number of classes
|
||||
|
||||
using InputPrecisions = std::tuple<InferenceEngine::Precision, // boxes and scores precisions
|
||||
InferenceEngine::Precision, // max_output_boxes_per_class precision
|
||||
InferenceEngine::Precision>; // iou_threshold, score_threshold, soft_nms_sigma precisions
|
||||
|
||||
using NmsParams = std::tuple<InputShapeParams, // Params using to create 1st and 2nd inputs
|
||||
InputPrecisions, // Input precisions
|
||||
int32_t, // Max output boxes per class
|
||||
float, // IOU threshold
|
||||
float, // Score threshold
|
||||
float, // Soft NMS sigma
|
||||
ngraph::op::v5::NonMaxSuppression::BoxEncodingType, // Box encoding
|
||||
bool, // Sort result descending
|
||||
ngraph::element::Type, // Output type
|
||||
std::string>; // Device name
|
||||
|
||||
class NmsLayerTest : public testing::WithParamInterface<NmsParams>, virtual public LayerTestsUtils::LayerTestsCommon {
|
||||
public:
|
||||
static std::string getTestCaseName(testing::TestParamInfo<NmsParams> obj);
|
||||
void ConfigureNetwork() override;
|
||||
void Infer() override;
|
||||
void Compare(const std::vector<std::vector<std::uint8_t>> &expectedOutputs, const std::vector<InferenceEngine::Blob::Ptr> &actualOutputs) override;
|
||||
|
||||
protected:
|
||||
void SetUp() override;
|
||||
|
||||
private:
|
||||
size_t numOfSelectedBoxes;
|
||||
};
|
||||
|
||||
} // namespace LayerTestsDefinitions
|
@ -0,0 +1,140 @@
|
||||
// Copyright (C) 2020 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "single_layer_tests/non_max_suppression.hpp"
|
||||
|
||||
namespace LayerTestsDefinitions {
|
||||
|
||||
using namespace ngraph;
|
||||
using namespace InferenceEngine;
|
||||
using namespace FuncTestUtils::PrecisionUtils;
|
||||
|
||||
std::string NmsLayerTest::getTestCaseName(testing::TestParamInfo<NmsParams> obj) {
|
||||
InputShapeParams inShapeParams;
|
||||
InputPrecisions inPrecisions;
|
||||
int32_t maxOutBoxesPerClass;
|
||||
float iouThr, scoreThr, softNmsSigma;
|
||||
op::v5::NonMaxSuppression::BoxEncodingType boxEncoding;
|
||||
bool sortResDescend;
|
||||
element::Type outType;
|
||||
std::string targetDevice;
|
||||
std::tie(inShapeParams, inPrecisions, maxOutBoxesPerClass, iouThr, scoreThr, softNmsSigma, boxEncoding, sortResDescend, outType, targetDevice) = obj.param;
|
||||
|
||||
size_t numBatches, numBoxes, numClasses;
|
||||
std::tie(numBatches, numBoxes, numClasses) = inShapeParams;
|
||||
|
||||
Precision paramsPrec, maxBoxPrec, thrPrec;
|
||||
std::tie(paramsPrec, maxBoxPrec, thrPrec) = inPrecisions;
|
||||
|
||||
std::ostringstream result;
|
||||
result << "numBatches=" << numBatches << "_numBoxes=" << numBoxes << "_numClasses=" << numClasses << "_";
|
||||
result << "paramsPrec=" << paramsPrec << "_maxBoxPrec=" << maxBoxPrec << "_thrPrec=" << thrPrec << "_";
|
||||
result << "maxOutBoxesPerClass=" << maxOutBoxesPerClass << "_";
|
||||
result << "iouThr=" << iouThr << "_scoreThr=" << scoreThr << "_softNmsSigma=" << softNmsSigma << "_";
|
||||
result << "boxEncoding=" << boxEncoding << "_sortResDescend=" << sortResDescend << "_outType=" << outType << "_";
|
||||
result << "TargetDevice=" << targetDevice;
|
||||
return result.str();
|
||||
}
|
||||
|
||||
void NmsLayerTest::ConfigureNetwork() {
|
||||
const OutputsDataMap &outputMap = cnnNetwork.getOutputsInfo();
|
||||
auto out = outputMap.begin();
|
||||
for (size_t i = 0; i < outputMap.size(); i++) {
|
||||
if (i < 2) {
|
||||
TensorDesc desc(out->second->getTensorDesc().getPrecision(), SizeVector{numOfSelectedBoxes, 3},
|
||||
TensorDesc::getLayoutByDims(SizeVector{numOfSelectedBoxes, 3}));
|
||||
*(out->second) = *std::make_shared<Data>(out->first, desc);
|
||||
}
|
||||
out++;
|
||||
}
|
||||
}
|
||||
|
||||
void NmsLayerTest::Infer() {
|
||||
inferRequest = executableNetwork.CreateInferRequest();
|
||||
inputs.clear();
|
||||
|
||||
size_t it = 0;
|
||||
for (const auto &input : cnnNetwork.getInputsInfo()) {
|
||||
const auto &info = input.second;
|
||||
Blob::Ptr blob;
|
||||
|
||||
if (it == 1) {
|
||||
blob = make_blob_with_precision(info->getTensorDesc());
|
||||
blob->allocate();
|
||||
CommonTestUtils::fill_data_random_float<Precision::FP32>(blob, 1, 0, 1000);
|
||||
} else {
|
||||
blob = GenerateInput(*info);
|
||||
}
|
||||
inferRequest.SetBlob(info->name(), blob);
|
||||
inputs.push_back(blob);
|
||||
it++;
|
||||
}
|
||||
inferRequest.Infer();
|
||||
}
|
||||
|
||||
void NmsLayerTest::Compare(const std::vector<std::vector<std::uint8_t>> &expectedOutputs, const std::vector<Blob::Ptr> &actualOutputs) {
|
||||
for (int outputIndex = static_cast<int>(expectedOutputs.size()) - 1; outputIndex >=0 ; outputIndex--) {
|
||||
const auto& expected = expectedOutputs[outputIndex];
|
||||
const auto& actual = actualOutputs[outputIndex];
|
||||
|
||||
const auto &expectedBuffer = expected.data();
|
||||
auto memory = as<MemoryBlob>(actual);
|
||||
IE_ASSERT(memory);
|
||||
const auto lockedMemory = memory->wmap();
|
||||
const auto actualBuffer = lockedMemory.as<const uint8_t *>();
|
||||
|
||||
if (outputIndex == 2) {
|
||||
if (expected.size() != actual->byteSize())
|
||||
throw std::runtime_error("Expected and actual size 3rd output have different size");
|
||||
}
|
||||
|
||||
const auto &precision = actual->getTensorDesc().getPrecision();
|
||||
size_t size = expected.size() / actual->getTensorDesc().getPrecision().size();
|
||||
switch (precision) {
|
||||
case Precision::FP32:
|
||||
LayerTestsCommon::Compare(reinterpret_cast<const float *>(expectedBuffer), reinterpret_cast<const float *>(actualBuffer), size, threshold);
|
||||
break;
|
||||
case Precision::I32:
|
||||
LayerTestsCommon::Compare(reinterpret_cast<const int32_t *>(expectedBuffer), reinterpret_cast<const int32_t *>(actualBuffer), size, 0);
|
||||
break;
|
||||
default:
|
||||
FAIL() << "Comparator for " << precision << " precision isn't supported";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void NmsLayerTest::SetUp() {
|
||||
InputShapeParams inShapeParams;
|
||||
InputPrecisions inPrecisions;
|
||||
size_t maxOutBoxesPerClass;
|
||||
float iouThr, scoreThr, softNmsSigma;
|
||||
op::v5::NonMaxSuppression::BoxEncodingType boxEncoding;
|
||||
bool sortResDescend;
|
||||
element::Type outType;
|
||||
std::tie(inShapeParams, inPrecisions, maxOutBoxesPerClass, iouThr, scoreThr, softNmsSigma, boxEncoding, sortResDescend, outType,
|
||||
targetDevice) = this->GetParam();
|
||||
|
||||
size_t numBatches, numBoxes, numClasses;
|
||||
std::tie(numBatches, numBoxes, numClasses) = inShapeParams;
|
||||
|
||||
Precision paramsPrec, maxBoxPrec, thrPrec;
|
||||
std::tie(paramsPrec, maxBoxPrec, thrPrec) = inPrecisions;
|
||||
|
||||
numOfSelectedBoxes = std::min(numBoxes, maxOutBoxesPerClass) * numBatches * numClasses;
|
||||
|
||||
const std::vector<size_t> boxesShape{numBatches, numBoxes, 4}, scoresShape{numBatches, numClasses, numBoxes};
|
||||
auto ngPrc = convertIE2nGraphPrc(paramsPrec);
|
||||
auto params = builder::makeParams(ngPrc, {boxesShape, scoresShape});
|
||||
auto paramOuts = helpers::convert2OutputVector(helpers::castOps2Nodes<op::Parameter>(params));
|
||||
|
||||
auto nms = builder::makeNms(paramOuts[0], paramOuts[1], convertIE2nGraphPrc(maxBoxPrec), convertIE2nGraphPrc(thrPrec), maxOutBoxesPerClass, iouThr,
|
||||
scoreThr, softNmsSigma, boxEncoding, sortResDescend, outType);
|
||||
function = std::make_shared<Function>(nms, params, "NMS");
|
||||
}
|
||||
|
||||
TEST_P(NmsLayerTest, CompareWithRefs) {
|
||||
Run();
|
||||
};
|
||||
|
||||
} // namespace LayerTestsDefinitions
|
@ -268,7 +268,7 @@ void LayerTestsCommon::Compare(const InferenceEngine::Blob::Ptr &expected, const
|
||||
}
|
||||
}
|
||||
|
||||
void LayerTestsCommon::ConfigureNetwork() const {
|
||||
void LayerTestsCommon::ConfigureNetwork() {
|
||||
for (const auto &in : cnnNetwork.getInputsInfo()) {
|
||||
if (inLayout != InferenceEngine::Layout::ANY) {
|
||||
in.second->setLayout(inLayout);
|
||||
|
@ -190,7 +190,7 @@ protected:
|
||||
return core;
|
||||
}
|
||||
|
||||
void ConfigureNetwork() const;
|
||||
virtual void ConfigureNetwork();
|
||||
|
||||
void LoadNetwork();
|
||||
|
||||
|
@ -448,5 +448,17 @@ std::shared_ptr<ngraph::Node> makeNormalizeL2(const ngraph::Output<Node>& data,
|
||||
float eps,
|
||||
ngraph::op::EpsMode epsMode);
|
||||
|
||||
std::shared_ptr<ngraph::Node> makeNms(const ngraph::Output<Node> &boxes,
|
||||
const ngraph::Output<Node> &scores,
|
||||
const element::Type& maxBoxesPrec,
|
||||
const element::Type& thrPrec,
|
||||
const int32_t &maxOutBoxesPerClass,
|
||||
const float &iouThr,
|
||||
const float &scoreThr,
|
||||
const float &softNmsSigma,
|
||||
const ngraph::op::v5::NonMaxSuppression::BoxEncodingType &boxEncoding,
|
||||
const bool &sortResDescend,
|
||||
const ngraph::element::Type& outType);
|
||||
|
||||
} // namespace builder
|
||||
} // namespace ngraph
|
||||
|
@ -232,7 +232,7 @@ inline ngraph::NodeVector castOps2Nodes(const std::vector<std::shared_ptr<opType
|
||||
|
||||
std::vector<std::vector<std::uint8_t>> interpreterFunction(const std::shared_ptr<Function> &function,
|
||||
const std::vector<std::vector<std::uint8_t>> &inputs,
|
||||
std::vector<ngraph::element::Type_t> convertType = {});
|
||||
const std::vector<ngraph::element::Type_t> convertType = {});
|
||||
|
||||
//
|
||||
// This function compares two nGraph functions and requires them to have exactly one output
|
||||
|
@ -0,0 +1,31 @@
|
||||
// Copyright (C) 2020 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "ngraph_functions/builders.hpp"
|
||||
|
||||
namespace ngraph {
|
||||
namespace builder {
|
||||
|
||||
std::shared_ptr<ngraph::Node> makeNms(const ngraph::Output<Node> &boxes,
|
||||
const ngraph::Output<Node> &scores,
|
||||
const element::Type& maxBoxesPrec,
|
||||
const element::Type& thrPrec,
|
||||
const int32_t &maxOutBoxesPerClass,
|
||||
const float &iouThr,
|
||||
const float &scoreThr,
|
||||
const float &softNmsSigma,
|
||||
const ngraph::op::v5::NonMaxSuppression::BoxEncodingType &boxEncoding,
|
||||
const bool &sortResDescend,
|
||||
const ngraph::element::Type& outType) {
|
||||
auto maxOutBoxesPerClassNode = makeConstant(maxBoxesPrec, ngraph::Shape{}, std::vector<int32_t>{maxOutBoxesPerClass})->output(0);
|
||||
auto iouThrNode = makeConstant(thrPrec, ngraph::Shape{}, std::vector<float>{iouThr})->output(0);
|
||||
auto scoreThrNode = makeConstant(thrPrec, ngraph::Shape{}, std::vector<float>{scoreThr})->output(0);
|
||||
auto softNmsSigmaNode = makeConstant(thrPrec, ngraph::Shape{}, std::vector<float>{softNmsSigma})->output(0);
|
||||
|
||||
return std::make_shared<ngraph::op::v5::NonMaxSuppression>(boxes, scores, maxOutBoxesPerClassNode, iouThrNode, scoreThrNode, softNmsSigmaNode,
|
||||
boxEncoding, sortResDescend, outType);
|
||||
}
|
||||
|
||||
} // namespace builder
|
||||
} // namespace ngraph
|
@ -77,7 +77,7 @@ OutputVector convert2OutputVector(const std::vector<std::shared_ptr<Node>> &node
|
||||
}
|
||||
|
||||
std::vector<std::vector<std::uint8_t>> interpreterFunction(const std::shared_ptr<Function> &function, const std::vector<std::vector<std::uint8_t>> &inputs,
|
||||
std::vector<ngraph::element::Type_t> convertType) {
|
||||
const std::vector<ngraph::element::Type_t> convertType) {
|
||||
runtime::Backend::set_backend_shared_library_search_directory("");
|
||||
auto backend = runtime::Backend::create("INTERPRETER");
|
||||
|
||||
|
@ -262,7 +262,14 @@ namespace ngraph
|
||||
{
|
||||
std::sort(filteredBoxes.begin(),
|
||||
filteredBoxes.end(),
|
||||
[](const BoxInfo& l, const BoxInfo& r) { return l.score > r.score; });
|
||||
[](const BoxInfo& l, const BoxInfo& r) {
|
||||
return (l.score > r.score) ||
|
||||
(l.score == r.score && l.batch_index < r.batch_index) ||
|
||||
(l.score == r.score && l.batch_index == r.batch_index &&
|
||||
l.class_index < r.class_index) ||
|
||||
(l.score == r.score && l.batch_index == r.batch_index &&
|
||||
l.class_index == r.class_index && l.index < r.index);
|
||||
});
|
||||
}
|
||||
|
||||
size_t max_num_of_selected_indices = selected_indices_shape[0];
|
||||
|
Loading…
Reference in New Issue
Block a user