Added test for Pad (#1902)

* Addded test for Pad

* Add builder
This commit is contained in:
Liubov Batanina 2020-08-24 14:16:29 +03:00 committed by GitHub
parent c74643f6b6
commit 719797326b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 264 additions and 0 deletions

View File

@ -0,0 +1,97 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include "single_layer_tests/pad.hpp"
using namespace LayerTestsDefinitions;
namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16
};
const std::vector<std::vector<size_t>> padsBegin2D = {{0, 0}, {1, 1}, {2, 0}, {0, 3}};
const std::vector<std::vector<size_t>> padsEnd2D = {{0, 0}, {1, 1}, {0, 1}, {3, 2}};
const std::vector<float> argPadValue = {0.f, 1.f, 2.f, -1.f};
const std::vector<ngraph::helpers::PadMode> padMode = {
ngraph::helpers::PadMode::EDGE,
ngraph::helpers::PadMode::REFLECT,
};
const auto pad2DConstparams = testing::Combine(
testing::ValuesIn(padsBegin2D),
testing::ValuesIn(padsEnd2D),
testing::ValuesIn(argPadValue),
testing::Values(ngraph::helpers::PadMode::CONSTANT),
testing::ValuesIn(netPrecisions),
testing::Values(std::vector<size_t>{13, 5}),
testing::Values(CommonTestUtils::DEVICE_CPU)
);
INSTANTIATE_TEST_CASE_P(
Pad2DConst,
PadLayerTest,
pad2DConstparams,
PadLayerTest::getTestCaseName
);
const auto pad2Dparams = testing::Combine(
testing::ValuesIn(padsBegin2D),
testing::ValuesIn(padsEnd2D),
testing::Values(0),
testing::ValuesIn(padMode),
testing::ValuesIn(netPrecisions),
testing::Values(std::vector<size_t>{13, 5}),
testing::Values(CommonTestUtils::DEVICE_CPU)
);
INSTANTIATE_TEST_CASE_P(
Pad2D,
PadLayerTest,
pad2Dparams,
PadLayerTest::getTestCaseName
);
const std::vector<std::vector<size_t>> padsBegin4D = {{0, 0, 0, 0}, {1, 1, 1, 1}, {2, 0, 1, 0}, {0, 3, 0, 1}};
const std::vector<std::vector<size_t>> padsEnd4D = {{0, 0, 0, 0}, {1, 1, 1, 1}, {2, 0, 0, 1}, {1, 3, 2, 0}};
const auto pad4DConstparams = testing::Combine(
testing::ValuesIn(padsBegin4D),
testing::ValuesIn(padsEnd4D),
testing::ValuesIn(argPadValue),
testing::Values(ngraph::helpers::PadMode::CONSTANT),
testing::ValuesIn(netPrecisions),
testing::Values(std::vector<size_t>{3, 5, 10, 11}),
testing::Values(CommonTestUtils::DEVICE_CPU)
);
INSTANTIATE_TEST_CASE_P(
Pad4DConst,
PadLayerTest,
pad4DConstparams,
PadLayerTest::getTestCaseName
);
const auto pad4Dparams = testing::Combine(
testing::ValuesIn(padsBegin4D),
testing::ValuesIn(padsEnd4D),
testing::Values(0),
testing::ValuesIn(padMode),
testing::ValuesIn(netPrecisions),
testing::Values(std::vector<size_t>{3, 5, 10, 11}),
testing::Values(CommonTestUtils::DEVICE_CPU)
);
INSTANTIATE_TEST_CASE_P(
Pad4D,
PadLayerTest,
pad4Dparams,
PadLayerTest::getTestCaseName
);
} // namespace

View File

@ -0,0 +1,36 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <tuple>
#include <vector>
#include <string>
#include <memory>
#include "functional_test_utils/layer_test_utils.hpp"
#include "ngraph_functions/builders.hpp"
typedef std::tuple<
InferenceEngine::SizeVector, // padsBegin
InferenceEngine::SizeVector, // padsEnd
float, // argPadValue
ngraph::helpers::PadMode, // padMode
InferenceEngine::Precision, // Net precision
InferenceEngine::SizeVector, // Input shapes
LayerTestsUtils::TargetDevice // Target device name
> padLayerTestParamsSet;
namespace LayerTestsDefinitions {
class PadLayerTest : public testing::WithParamInterface<padLayerTestParamsSet>,
public LayerTestsUtils::LayerTestsCommon {
public:
static std::string getTestCaseName(testing::TestParamInfo<padLayerTestParamsSet> obj);
protected:
void SetUp() override;
};
} // namespace LayerTestsDefinitions

View File

@ -0,0 +1,56 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <tuple>
#include <vector>
#include <string>
#include <memory>
#include <functional>
#include "single_layer_tests/pad.hpp"
namespace LayerTestsDefinitions {
std::string PadLayerTest::getTestCaseName(testing::TestParamInfo<padLayerTestParamsSet> obj) {
InferenceEngine::Precision netPrecision;
InferenceEngine::SizeVector inputShapes, padsBegin, padsEnd;
ngraph::helpers::PadMode padMode;
float argPadValue;
std::string targetDevice;
std::tie(padsBegin, padsEnd, argPadValue, padMode, netPrecision, inputShapes, targetDevice) = obj.param;
std::ostringstream result;
result << "IS=" << CommonTestUtils::vec2str(inputShapes) << "_";
result << "padsBegin=" << CommonTestUtils::vec2str(padsBegin) << "_";
result << "padsEnd=" << CommonTestUtils::vec2str(padsEnd) << "_";
if (padMode == ngraph::helpers::PadMode::CONSTANT) {
result << "Value=" << argPadValue << "_";
}
result << "PadMode=" << padMode << "_";
result << "netPRC=" << netPrecision.name() << "_";
result << "targetDevice=" << targetDevice;
return result.str();
}
void PadLayerTest::SetUp() {
InferenceEngine::SizeVector inputShape, padsBegin, padsEnd;
float argPadValue;
ngraph::helpers::PadMode padMode;
InferenceEngine::Precision netPrecision;
std::tie(padsBegin, padsEnd, argPadValue, padMode, netPrecision, inputShape, targetDevice) = this->GetParam();
auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
auto params = ngraph::builder::makeParams(ngPrc, {inputShape});
auto paramOuts = ngraph::helpers::convert2OutputVector(
ngraph::helpers::castOps2Nodes<ngraph::opset3::Parameter>(params));
auto pad = ngraph::builder::makePad(paramOuts[0], padsBegin, padsEnd, argPadValue, padMode);
ngraph::ResultVector results{std::make_shared<ngraph::opset3::Result>(pad)};
function = std::make_shared<ngraph::Function>(results, params, "pad");
}
TEST_P(PadLayerTest, CompareWithRefs) {
Run();
}
} // namespace LayerTestsDefinitions

View File

@ -328,5 +328,11 @@ std::shared_ptr<ngraph::Node> makeFullyConnected(const ngraph::Output<Node>& in,
std::shared_ptr<ngraph::Node> makeConcat(const std::vector<ngraph::Output<Node>>& in,
const int& axis);
std::shared_ptr<ngraph::Node> makePad(const ngraph::Output<Node>& data,
const std::vector<size_t>& padsBegin,
const std::vector<size_t>& padsEnd,
float argPadValue,
ngraph::helpers::PadMode padMode);
} // namespace builder
} // namespace ngraph

View File

@ -165,7 +165,15 @@ enum class InputLayerType {
PARAMETER,
};
enum class PadMode {
CONSTANT,
EDGE,
REFLECT,
SYMMETRIC,
};
std::ostream &operator<<(std::ostream &os, const ReductionType &m);
std::ostream &operator<<(std::ostream &os, const PadMode &m);
inline std::string quantizationGranularityToString(const QuantizationGranularity &granularity) {
static std::map<QuantizationGranularity, std::string> names = {

View File

@ -0,0 +1,43 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include <memory>
#include "ngraph_functions/builders.hpp"
namespace ngraph {
namespace builder {
std::shared_ptr<ngraph::Node> makePad(const ngraph::Output<Node>& data,
const std::vector<size_t>& padsBegin,
const std::vector<size_t>& padsEnd,
float argPadValue,
ngraph::helpers::PadMode padMode) {
ngraph::op::PadMode pad_mode;
switch (padMode) {
case ngraph::helpers::PadMode::CONSTANT:
pad_mode = ngraph::op::PadMode::CONSTANT;
break;
case ngraph::helpers::PadMode::EDGE:
pad_mode = ngraph::op::PadMode::EDGE;
break;
case ngraph::helpers::PadMode::REFLECT:
pad_mode = ngraph::op::PadMode::REFLECT;
break;
case ngraph::helpers::PadMode::SYMMETRIC:
pad_mode = ngraph::op::PadMode::SYMMETRIC;
break;
default:
throw std::runtime_error("Can't create layer for this pad mode");
}
auto pads_begin = std::make_shared<ngraph::opset3::Constant>(ngraph::element::i64,
ngraph::Shape{padsBegin.size()}, padsBegin.data());
auto pads_end = std::make_shared<ngraph::opset3::Constant>(ngraph::element::i64,
ngraph::Shape{padsEnd.size()}, padsEnd.data());
auto arg_pad_value = std::make_shared<ngraph::opset3::Constant>(data.get_element_type(), ngraph::Shape{}, &argPadValue);
return std::make_shared<ngraph::opset3::Pad>(data, pads_begin, pads_end, arg_pad_value, pad_mode);
}
} // namespace builder
} // namespace ngraph

View File

@ -47,6 +47,24 @@ std::ostream &operator<<(std::ostream &os, const ReductionType &m) {
return os;
}
std::ostream &operator<<(std::ostream &os, const PadMode &m) {
switch (m) {
case PadMode::CONSTANT:
os << "CONSTANT";
break;
case PadMode::EDGE:
os << "EDGE";
break;
case PadMode::REFLECT:
os << "REFLECT";
break;
case PadMode::SYMMETRIC:
os << "SYMMETRIC";
break;
}
return os;
}
OutputVector convert2OutputVector(const std::vector<std::shared_ptr<Node>> &nodes) {
OutputVector outs;
std::for_each(nodes.begin(), nodes.end(), [&outs](const std::shared_ptr<Node> &n) {