diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/non_max_suppression.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/non_max_suppression.cpp new file mode 100644 index 00000000000..a8150f53304 --- /dev/null +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/non_max_suppression.cpp @@ -0,0 +1,42 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "single_layer_tests/non_max_suppression.hpp" +#include "common_test_utils/test_constants.hpp" + +using namespace LayerTestsDefinitions; +using namespace InferenceEngine; +using namespace ngraph; + +const std::vector inShapeParams = { + InputShapeParams{3, 100, 5}, + InputShapeParams{1, 10, 50}, + InputShapeParams{2, 50, 50} +}; + +const std::vector maxOutBoxPerClass = {5, 20}; +const std::vector threshold = {0.3f, 0.7f}; +const std::vector sigmaThreshold = {0.0f, 0.5f}; +const std::vector encodType = {op::v5::NonMaxSuppression::BoxEncodingType::CENTER, + op::v5::NonMaxSuppression::BoxEncodingType::CORNER}; +const std::vector sortResDesc = {true, false}; +const std::vector outType = {element::i32, element::i64}; + +const auto nmsParams = ::testing::Combine(::testing::ValuesIn(inShapeParams), + ::testing::Combine(::testing::Values(Precision::FP32), + ::testing::Values(Precision::I32), + ::testing::Values(Precision::FP32)), + ::testing::ValuesIn(maxOutBoxPerClass), + ::testing::ValuesIn(threshold), + ::testing::ValuesIn(threshold), + ::testing::ValuesIn(sigmaThreshold), + ::testing::ValuesIn(encodType), + ::testing::ValuesIn(sortResDesc), + ::testing::ValuesIn(outType), + ::testing::Values(CommonTestUtils::DEVICE_CPU) +); + +INSTANTIATE_TEST_CASE_P(smoke_NmsLayerTest, NmsLayerTest, nmsParams, NmsLayerTest::getTestCaseName); diff --git a/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/non_max_suppression.hpp b/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/non_max_suppression.hpp new file mode 100644 index 00000000000..5a51f361f31 --- /dev/null +++ b/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/non_max_suppression.hpp @@ -0,0 +1,48 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include + +#include "functional_test_utils/layer_test_utils.hpp" +#include "ngraph_functions/builders.hpp" + +namespace LayerTestsDefinitions { + +using InputShapeParams = std::tuple; // Number of classes + +using InputPrecisions = std::tuple; // iou_threshold, score_threshold, soft_nms_sigma precisions + +using NmsParams = std::tuple; // Device name + +class NmsLayerTest : public testing::WithParamInterface, virtual public LayerTestsUtils::LayerTestsCommon { +public: + static std::string getTestCaseName(testing::TestParamInfo obj); + void ConfigureNetwork() override; + void Infer() override; + void Compare(const std::vector> &expectedOutputs, const std::vector &actualOutputs) override; + +protected: + void SetUp() override; + +private: + size_t numOfSelectedBoxes; +}; + +} // namespace LayerTestsDefinitions diff --git a/inference-engine/tests/functional/plugin/shared/src/single_layer_tests/non_max_suppression.cpp b/inference-engine/tests/functional/plugin/shared/src/single_layer_tests/non_max_suppression.cpp new file mode 100644 index 00000000000..f59d725d671 --- /dev/null +++ b/inference-engine/tests/functional/plugin/shared/src/single_layer_tests/non_max_suppression.cpp @@ -0,0 +1,140 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "single_layer_tests/non_max_suppression.hpp" + +namespace LayerTestsDefinitions { + +using namespace ngraph; +using namespace InferenceEngine; +using namespace FuncTestUtils::PrecisionUtils; + +std::string NmsLayerTest::getTestCaseName(testing::TestParamInfo obj) { + InputShapeParams inShapeParams; + InputPrecisions inPrecisions; + int32_t maxOutBoxesPerClass; + float iouThr, scoreThr, softNmsSigma; + op::v5::NonMaxSuppression::BoxEncodingType boxEncoding; + bool sortResDescend; + element::Type outType; + std::string targetDevice; + std::tie(inShapeParams, inPrecisions, maxOutBoxesPerClass, iouThr, scoreThr, softNmsSigma, boxEncoding, sortResDescend, outType, targetDevice) = obj.param; + + size_t numBatches, numBoxes, numClasses; + std::tie(numBatches, numBoxes, numClasses) = inShapeParams; + + Precision paramsPrec, maxBoxPrec, thrPrec; + std::tie(paramsPrec, maxBoxPrec, thrPrec) = inPrecisions; + + std::ostringstream result; + result << "numBatches=" << numBatches << "_numBoxes=" << numBoxes << "_numClasses=" << numClasses << "_"; + result << "paramsPrec=" << paramsPrec << "_maxBoxPrec=" << maxBoxPrec << "_thrPrec=" << thrPrec << "_"; + result << "maxOutBoxesPerClass=" << maxOutBoxesPerClass << "_"; + result << "iouThr=" << iouThr << "_scoreThr=" << scoreThr << "_softNmsSigma=" << softNmsSigma << "_"; + result << "boxEncoding=" << boxEncoding << "_sortResDescend=" << sortResDescend << "_outType=" << outType << "_"; + result << "TargetDevice=" << targetDevice; + return result.str(); +} + +void NmsLayerTest::ConfigureNetwork() { + const OutputsDataMap &outputMap = cnnNetwork.getOutputsInfo(); + auto out = outputMap.begin(); + for (size_t i = 0; i < outputMap.size(); i++) { + if (i < 2) { + TensorDesc desc(out->second->getTensorDesc().getPrecision(), SizeVector{numOfSelectedBoxes, 3}, + TensorDesc::getLayoutByDims(SizeVector{numOfSelectedBoxes, 3})); + *(out->second) = *std::make_shared(out->first, desc); + } + out++; + } +} + +void NmsLayerTest::Infer() { + inferRequest = executableNetwork.CreateInferRequest(); + inputs.clear(); + + size_t it = 0; + for (const auto &input : cnnNetwork.getInputsInfo()) { + const auto &info = input.second; + Blob::Ptr blob; + + if (it == 1) { + blob = make_blob_with_precision(info->getTensorDesc()); + blob->allocate(); + CommonTestUtils::fill_data_random_float(blob, 1, 0, 1000); + } else { + blob = GenerateInput(*info); + } + inferRequest.SetBlob(info->name(), blob); + inputs.push_back(blob); + it++; + } + inferRequest.Infer(); +} + +void NmsLayerTest::Compare(const std::vector> &expectedOutputs, const std::vector &actualOutputs) { + for (int outputIndex = static_cast(expectedOutputs.size()) - 1; outputIndex >=0 ; outputIndex--) { + const auto& expected = expectedOutputs[outputIndex]; + const auto& actual = actualOutputs[outputIndex]; + + const auto &expectedBuffer = expected.data(); + auto memory = as(actual); + IE_ASSERT(memory); + const auto lockedMemory = memory->wmap(); + const auto actualBuffer = lockedMemory.as(); + + if (outputIndex == 2) { + if (expected.size() != actual->byteSize()) + throw std::runtime_error("Expected and actual size 3rd output have different size"); + } + + const auto &precision = actual->getTensorDesc().getPrecision(); + size_t size = expected.size() / actual->getTensorDesc().getPrecision().size(); + switch (precision) { + case Precision::FP32: + LayerTestsCommon::Compare(reinterpret_cast(expectedBuffer), reinterpret_cast(actualBuffer), size, threshold); + break; + case Precision::I32: + LayerTestsCommon::Compare(reinterpret_cast(expectedBuffer), reinterpret_cast(actualBuffer), size, 0); + break; + default: + FAIL() << "Comparator for " << precision << " precision isn't supported"; + } + } +} + +void NmsLayerTest::SetUp() { + InputShapeParams inShapeParams; + InputPrecisions inPrecisions; + size_t maxOutBoxesPerClass; + float iouThr, scoreThr, softNmsSigma; + op::v5::NonMaxSuppression::BoxEncodingType boxEncoding; + bool sortResDescend; + element::Type outType; + std::tie(inShapeParams, inPrecisions, maxOutBoxesPerClass, iouThr, scoreThr, softNmsSigma, boxEncoding, sortResDescend, outType, + targetDevice) = this->GetParam(); + + size_t numBatches, numBoxes, numClasses; + std::tie(numBatches, numBoxes, numClasses) = inShapeParams; + + Precision paramsPrec, maxBoxPrec, thrPrec; + std::tie(paramsPrec, maxBoxPrec, thrPrec) = inPrecisions; + + numOfSelectedBoxes = std::min(numBoxes, maxOutBoxesPerClass) * numBatches * numClasses; + + const std::vector boxesShape{numBatches, numBoxes, 4}, scoresShape{numBatches, numClasses, numBoxes}; + auto ngPrc = convertIE2nGraphPrc(paramsPrec); + auto params = builder::makeParams(ngPrc, {boxesShape, scoresShape}); + auto paramOuts = helpers::convert2OutputVector(helpers::castOps2Nodes(params)); + + auto nms = builder::makeNms(paramOuts[0], paramOuts[1], convertIE2nGraphPrc(maxBoxPrec), convertIE2nGraphPrc(thrPrec), maxOutBoxesPerClass, iouThr, + scoreThr, softNmsSigma, boxEncoding, sortResDescend, outType); + function = std::make_shared(nms, params, "NMS"); +} + +TEST_P(NmsLayerTest, CompareWithRefs) { + Run(); +}; + +} // namespace LayerTestsDefinitions diff --git a/inference-engine/tests/ie_test_utils/functional_test_utils/layer_test_utils.cpp b/inference-engine/tests/ie_test_utils/functional_test_utils/layer_test_utils.cpp index 6ea47fdb33a..0d20527b186 100644 --- a/inference-engine/tests/ie_test_utils/functional_test_utils/layer_test_utils.cpp +++ b/inference-engine/tests/ie_test_utils/functional_test_utils/layer_test_utils.cpp @@ -268,7 +268,7 @@ void LayerTestsCommon::Compare(const InferenceEngine::Blob::Ptr &expected, const } } -void LayerTestsCommon::ConfigureNetwork() const { +void LayerTestsCommon::ConfigureNetwork() { for (const auto &in : cnnNetwork.getInputsInfo()) { if (inLayout != InferenceEngine::Layout::ANY) { in.second->setLayout(inLayout); diff --git a/inference-engine/tests/ie_test_utils/functional_test_utils/layer_test_utils.hpp b/inference-engine/tests/ie_test_utils/functional_test_utils/layer_test_utils.hpp index 319e84ecab0..99b895dab00 100644 --- a/inference-engine/tests/ie_test_utils/functional_test_utils/layer_test_utils.hpp +++ b/inference-engine/tests/ie_test_utils/functional_test_utils/layer_test_utils.hpp @@ -190,7 +190,7 @@ protected: return core; } - void ConfigureNetwork() const; + virtual void ConfigureNetwork(); void LoadNetwork(); diff --git a/inference-engine/tests/ngraph_functions/include/ngraph_functions/builders.hpp b/inference-engine/tests/ngraph_functions/include/ngraph_functions/builders.hpp index adb1ad2ac6f..13069b516a6 100644 --- a/inference-engine/tests/ngraph_functions/include/ngraph_functions/builders.hpp +++ b/inference-engine/tests/ngraph_functions/include/ngraph_functions/builders.hpp @@ -448,5 +448,17 @@ std::shared_ptr makeNormalizeL2(const ngraph::Output& data, float eps, ngraph::op::EpsMode epsMode); +std::shared_ptr makeNms(const ngraph::Output &boxes, + const ngraph::Output &scores, + const element::Type& maxBoxesPrec, + const element::Type& thrPrec, + const int32_t &maxOutBoxesPerClass, + const float &iouThr, + const float &scoreThr, + const float &softNmsSigma, + const ngraph::op::v5::NonMaxSuppression::BoxEncodingType &boxEncoding, + const bool &sortResDescend, + const ngraph::element::Type& outType); + } // namespace builder } // namespace ngraph diff --git a/inference-engine/tests/ngraph_functions/include/ngraph_functions/utils/ngraph_helpers.hpp b/inference-engine/tests/ngraph_functions/include/ngraph_functions/utils/ngraph_helpers.hpp index 3e578e92170..0be35c01454 100644 --- a/inference-engine/tests/ngraph_functions/include/ngraph_functions/utils/ngraph_helpers.hpp +++ b/inference-engine/tests/ngraph_functions/include/ngraph_functions/utils/ngraph_helpers.hpp @@ -232,7 +232,7 @@ inline ngraph::NodeVector castOps2Nodes(const std::vector> interpreterFunction(const std::shared_ptr &function, const std::vector> &inputs, - std::vector convertType = {}); + const std::vector convertType = {}); // // This function compares two nGraph functions and requires them to have exactly one output diff --git a/inference-engine/tests/ngraph_functions/src/non_max_suppression.cpp b/inference-engine/tests/ngraph_functions/src/non_max_suppression.cpp new file mode 100644 index 00000000000..d4aa3c7b4f7 --- /dev/null +++ b/inference-engine/tests/ngraph_functions/src/non_max_suppression.cpp @@ -0,0 +1,31 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "ngraph_functions/builders.hpp" + +namespace ngraph { +namespace builder { + +std::shared_ptr makeNms(const ngraph::Output &boxes, + const ngraph::Output &scores, + const element::Type& maxBoxesPrec, + const element::Type& thrPrec, + const int32_t &maxOutBoxesPerClass, + const float &iouThr, + const float &scoreThr, + const float &softNmsSigma, + const ngraph::op::v5::NonMaxSuppression::BoxEncodingType &boxEncoding, + const bool &sortResDescend, + const ngraph::element::Type& outType) { + auto maxOutBoxesPerClassNode = makeConstant(maxBoxesPrec, ngraph::Shape{}, std::vector{maxOutBoxesPerClass})->output(0); + auto iouThrNode = makeConstant(thrPrec, ngraph::Shape{}, std::vector{iouThr})->output(0); + auto scoreThrNode = makeConstant(thrPrec, ngraph::Shape{}, std::vector{scoreThr})->output(0); + auto softNmsSigmaNode = makeConstant(thrPrec, ngraph::Shape{}, std::vector{softNmsSigma})->output(0); + + return std::make_shared(boxes, scores, maxOutBoxesPerClassNode, iouThrNode, scoreThrNode, softNmsSigmaNode, + boxEncoding, sortResDescend, outType); +} + +} // namespace builder +} // namespace ngraph diff --git a/inference-engine/tests/ngraph_functions/src/utils/ngraph_helpers.cpp b/inference-engine/tests/ngraph_functions/src/utils/ngraph_helpers.cpp index 5fdb43cf76b..72df48f161d 100644 --- a/inference-engine/tests/ngraph_functions/src/utils/ngraph_helpers.cpp +++ b/inference-engine/tests/ngraph_functions/src/utils/ngraph_helpers.cpp @@ -77,7 +77,7 @@ OutputVector convert2OutputVector(const std::vector> &node } std::vector> interpreterFunction(const std::shared_ptr &function, const std::vector> &inputs, - std::vector convertType) { + const std::vector convertType) { runtime::Backend::set_backend_shared_library_search_directory(""); auto backend = runtime::Backend::create("INTERPRETER"); diff --git a/ngraph/core/reference/src/runtime/reference/non_max_suppression.cpp b/ngraph/core/reference/src/runtime/reference/non_max_suppression.cpp index a68eb42ea33..55719a597cc 100644 --- a/ngraph/core/reference/src/runtime/reference/non_max_suppression.cpp +++ b/ngraph/core/reference/src/runtime/reference/non_max_suppression.cpp @@ -262,7 +262,14 @@ namespace ngraph { std::sort(filteredBoxes.begin(), filteredBoxes.end(), - [](const BoxInfo& l, const BoxInfo& r) { return l.score > r.score; }); + [](const BoxInfo& l, const BoxInfo& r) { + return (l.score > r.score) || + (l.score == r.score && l.batch_index < r.batch_index) || + (l.score == r.score && l.batch_index == r.batch_index && + l.class_index < r.class_index) || + (l.score == r.score && l.batch_index == r.batch_index && + l.class_index == r.class_index && l.index < r.index); + }); } size_t max_num_of_selected_indices = selected_indices_shape[0];