[CPU] Add check to reduce for scalar dims (#1577)

This commit is contained in:
Maxim Andronov 2020-08-06 14:44:29 +03:00 committed by GitHub
parent 853cfaa038
commit 21c4312453
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
14 changed files with 108 additions and 65 deletions

View File

@ -80,7 +80,8 @@ public:
int32_t *idx_data = inputs[REDUCE_INDEXES]->cbuffer().as<int32_t *>() +
inputs[REDUCE_INDEXES]->getTensorDesc().getBlockingDesc().getOffsetPadding();
SizeVector axes;
for (size_t i = 0; i < idx_dims[0]; i++) {
const size_t axesIter = idx_dims.empty() ? 1 : idx_dims[0];
for (size_t i = 0; i < axesIter; i++) {
int32_t axis = idx_data[i];
if (axis < 0)
axis += data_dims.size();

View File

@ -7,7 +7,6 @@
#include "common_test_utils/test_constants.hpp"
using namespace LayerTestsDefinitions;
using namespace LayerTestsDefinitions::EltwiseParams;
namespace {
std::vector<std::vector<std::vector<size_t>>> inShapes = {
@ -34,9 +33,9 @@ std::vector<ngraph::helpers::InputLayerType> secondaryInputTypes = {
ngraph::helpers::InputLayerType::PARAMETER,
};
std::vector<OpType> opTypes = {
OpType::SCALAR,
OpType::VECTOR,
std::vector<CommonTestUtils::OpType> opTypes = {
CommonTestUtils::OpType::SCALAR,
CommonTestUtils::OpType::VECTOR,
};
std::vector<ngraph::helpers::EltwiseTypes> eltwiseOpTypes = {

View File

@ -15,14 +15,20 @@ const std::vector<InferenceEngine::Precision> netPrecisions = {
};
const std::vector<std::vector<size_t>> inputShapes = {
std::vector<size_t>{10, 20, 30, 40},
std::vector<size_t>{10, 20, 40},
std::vector<size_t>{5, 6, 10, 11},
};
const std::vector<std::vector<int>> axes = {
{0},
{0, 3},
{1, -1},
{0, 2},
{1, -1}
};
std::vector<CommonTestUtils::OpType> opTypes = {
CommonTestUtils::OpType::SCALAR,
CommonTestUtils::OpType::VECTOR,
};
const std::vector<ngraph::helpers::ReductionType> reductionTypes = {
ngraph::helpers::ReductionType::Mean,
ngraph::helpers::ReductionType::Min,
@ -34,8 +40,9 @@ const std::vector<ngraph::helpers::ReductionType> reductionTypes = {
ngraph::helpers::ReductionType::LogicalAnd,
};
const auto params = testing::Combine(
testing::ValuesIn(axes),
const auto paramsOneAxis = testing::Combine(
testing::Values(std::vector<int>{0}),
testing::ValuesIn(opTypes),
testing::Values(true, false),
testing::ValuesIn(reductionTypes),
testing::ValuesIn(netPrecisions),
@ -43,6 +50,22 @@ const auto params = testing::Combine(
testing::Values(CommonTestUtils::DEVICE_CPU)
);
INSTANTIATE_TEST_CASE_P(
ReduceOneAxis,
ReduceOpsLayerTest,
paramsOneAxis,
ReduceOpsLayerTest::getTestCaseName
);
const auto params = testing::Combine(
testing::ValuesIn(axes),
testing::Values(opTypes[1]),
testing::Values(true, false),
testing::ValuesIn(reductionTypes),
testing::ValuesIn(netPrecisions),
testing::ValuesIn(inputShapes),
testing::Values(CommonTestUtils::DEVICE_CPU)
);
INSTANTIATE_TEST_CASE_P(
Reduce,
@ -50,4 +73,5 @@ INSTANTIATE_TEST_CASE_P(
params,
ReduceOpsLayerTest::getTestCaseName
);
} // namespace

View File

@ -7,7 +7,6 @@
#include "common_test_utils/test_constants.hpp"
using namespace LayerTestsDefinitions;
using namespace LayerTestsDefinitions::EltwiseParams;
namespace {
std::vector<std::vector<std::vector<size_t>>> inShapes = {
@ -31,9 +30,9 @@ std::vector<ngraph::helpers::InputLayerType> secondaryInputTypes = {
ngraph::helpers::InputLayerType::PARAMETER,
};
std::vector<OpType> opTypes = {
OpType::SCALAR,
OpType::VECTOR,
std::vector<CommonTestUtils::OpType> opTypes = {
CommonTestUtils::OpType::SCALAR,
CommonTestUtils::OpType::VECTOR,
};
std::vector<ngraph::helpers::EltwiseTypes> eltwiseOpTypes = {

View File

@ -7,7 +7,6 @@
#include "common_test_utils/test_constants.hpp"
using namespace LayerTestsDefinitions;
using namespace LayerTestsDefinitions::EltwiseParams;
namespace {
std::vector<std::vector<std::vector<size_t>>> inShapes = {
@ -35,9 +34,9 @@ std::vector<ngraph::helpers::InputLayerType> secondaryInputTypes = {
ngraph::helpers::InputLayerType::PARAMETER,
};
std::vector<OpType> opTypes = {
OpType::SCALAR,
OpType::VECTOR,
std::vector<CommonTestUtils::OpType> opTypes = {
CommonTestUtils::OpType::SCALAR,
CommonTestUtils::OpType::VECTOR,
};
std::vector<ngraph::helpers::EltwiseTypes> eltwiseOpTypes = {

View File

@ -10,7 +10,6 @@
#include <vector>
using namespace LayerTestsDefinitions;
using namespace LayerTestsDefinitions::EltwiseParams;
namespace {
@ -36,9 +35,9 @@ std::vector<ngraph::helpers::InputLayerType> secondaryInputTypes = {
ngraph::helpers::InputLayerType::PARAMETER,
};
std::vector<OpType> opTypes = {
OpType::SCALAR,
OpType::VECTOR,
std::vector<CommonTestUtils::OpType> opTypes = {
CommonTestUtils::OpType::SCALAR,
CommonTestUtils::OpType::VECTOR,
};
std::vector<ngraph::helpers::EltwiseTypes> eltwiseOpTypes = {

View File

@ -12,21 +12,16 @@
#include "common_test_utils/common_utils.hpp"
#include "common_test_utils/test_common.hpp"
#include "common_test_utils/test_constants.hpp"
#include "common_test_utils/common_layers_params.hpp"
#include "ie_core.hpp"
namespace LayerTestsDefinitions {
namespace EltwiseParams {
enum class OpType {
SCALAR,
VECTOR
};
} // namespace EltwiseParams
typedef std::tuple<
std::vector<std::vector<size_t>>, // input shapes
ngraph::helpers::EltwiseTypes, // eltwise op type
ngraph::helpers::InputLayerType, // secondary input type
EltwiseParams::OpType, // op type
CommonTestUtils::OpType, // op type
InferenceEngine::Precision, // Net precision
std::string, // Device name
std::map<std::string, std::string> // Additional network configuration

View File

@ -11,11 +11,13 @@
#include "functional_test_utils/layer_test_utils.hpp"
#include "ngraph_functions/builders.hpp"
#include "common_test_utils/common_layers_params.hpp"
namespace LayerTestsDefinitions {
typedef std::tuple<
std::vector<int>, // Axis to reduce order
CommonTestUtils::OpType, // Scalar or vector type axis
bool, // Keep dims
ngraph::helpers::ReductionType, // Reduce operation type
InferenceEngine::Precision, // Net precision

View File

@ -14,25 +14,12 @@
#include "single_layer_tests/eltwise.hpp"
namespace LayerTestsDefinitions {
std::ostream& operator<<(std::ostream & os, EltwiseParams::OpType type) {
switch (type) {
case EltwiseParams::OpType::SCALAR:
os << "SCALAR";
break;
case EltwiseParams::OpType::VECTOR:
os << "VECTOR";
break;
default:
THROW_IE_EXCEPTION << "NOT_SUPPORTED_OP_TYPE";
}
return os;
}
std::string EltwiseLayerTest::getTestCaseName(testing::TestParamInfo<EltwiseTestParams> obj) {
std::vector<std::vector<size_t>> inputShapes;
InferenceEngine::Precision netPrecision;
ngraph::helpers::InputLayerType secondaryInputType;
EltwiseParams::OpType opType;
CommonTestUtils::OpType opType;
ngraph::helpers::EltwiseTypes eltwiseOpType;
std::string targetName;
std::map<std::string, std::string> additional_config;
@ -52,7 +39,7 @@ void EltwiseLayerTest::SetUp() {
std::vector<std::vector<size_t>> inputShapes;
InferenceEngine::Precision netPrecision;
ngraph::helpers::InputLayerType secondaryInputType;
EltwiseParams::OpType opType;
CommonTestUtils::OpType opType;
ngraph::helpers::EltwiseTypes eltwiseType;
std::map<std::string, std::string> additional_config;
std::tie(inputShapes, eltwiseType, secondaryInputType, opType, netPrecision, targetDevice, additional_config) = this->GetParam();
@ -73,11 +60,11 @@ void EltwiseLayerTest::SetUp() {
std::vector<size_t> shape_input_secondary;
switch (opType) {
case EltwiseParams::OpType::SCALAR: {
case CommonTestUtils::OpType::SCALAR: {
shape_input_secondary = std::vector<size_t>({1});
break;
}
case EltwiseParams::OpType::VECTOR:
case CommonTestUtils::OpType::VECTOR:
shape_input_secondary = inputShape2;
break;
default:

View File

@ -25,11 +25,13 @@ std::string ReduceOpsLayerTest::getTestCaseName(testing::TestParamInfo<reduceMea
ngraph::helpers::ReductionType reductionType;
std::vector<size_t> inputShape;
std::vector<int> axes;
CommonTestUtils::OpType opType;
std::string targetDevice;
std::tie(axes, keepDims, reductionType, netPrecision, inputShape, targetDevice) = obj.param;
std::tie(axes, opType, keepDims, reductionType, netPrecision, inputShape, targetDevice) = obj.param;
std::ostringstream result;
result << "IS=" << CommonTestUtils::vec2str(inputShape) << "_";
result << "axes=" << CommonTestUtils::vec2str(axes) << "_";
result << "opType=" << opType << "_";
result << "type=" << reductionType << "_";
if (keepDims) result << "KeepDims_";
result << "netPRC=" << netPrecision.name() << "_";
@ -46,13 +48,32 @@ void ReduceOpsLayerTest::SetUp() {
ngraph::helpers::ReductionType reductionType;
std::vector<size_t> inputShape;
std::vector<int> axes;
std::tie(axes, keepDims, reductionType, netPrecision, inputShape, targetDevice) = GetParam();
CommonTestUtils::OpType opType;
std::tie(axes, opType, keepDims, reductionType, netPrecision, inputShape, targetDevice) = GetParam();
auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
auto params = ngraph::builder::makeParams(ngPrc, {inputShape});
auto paramOuts = ngraph::helpers::convert2OutputVector(
ngraph::helpers::castOps2Nodes<ngraph::op::Parameter>(params));
const auto reduce = ngraph::builder::makeReduce(paramOuts, axes, keepDims, reductionType);
std::vector<size_t> shapeAxes;
switch (opType) {
case CommonTestUtils::OpType::SCALAR: {
if (axes.size() > 1)
FAIL() << "In reduce op if op type is scalar, 'axis' input's must contain 1 element";
break;
}
case CommonTestUtils::OpType::VECTOR: {
shapeAxes.push_back(axes.size());
break;
}
default:
FAIL() << "Reduce op doesn't support operation type: " << opType;
}
auto reductionAxesNode = std::dynamic_pointer_cast<ngraph::Node>(
std::make_shared<ngraph::opset3::Constant>(ngraph::element::Type_t::i64, ngraph::Shape(shapeAxes), axes));
const auto reduce = ngraph::builder::makeReduce(paramOuts[0], reductionAxesNode, keepDims, reductionType);
const ngraph::ResultVector results{std::make_shared<ngraph::opset3::Result>(reduce)};
function = std::make_shared<ngraph::Function>(results, params, "Reduce");
}

View File

@ -209,4 +209,18 @@ void get_common_dims(const InferenceEngine::Blob &blob,
}
}
std::ostream& operator<<(std::ostream & os, OpType type) {
switch (type) {
case OpType::SCALAR:
os << "SCALAR";
break;
case OpType::VECTOR:
os << "VECTOR";
break;
default:
THROW_IE_EXCEPTION << "NOT_SUPPORTED_OP_TYPE";
}
return os;
}
} // namespace CommonTestUtils

View File

@ -96,4 +96,10 @@ void get_common_dims(const InferenceEngine::Blob &blob,
int32_t &dimz,
int32_t &dimn);
enum class OpType {
SCALAR,
VECTOR
};
std::ostream& operator<<(std::ostream & os, OpType type);
} // namespace CommonTestUtils

View File

@ -271,8 +271,8 @@ std::shared_ptr<Node> makeMatMul(const Output<Node> &A,
bool transpose_a = false,
bool transpose_b = false);
std::shared_ptr<ngraph::Node> makeReduce(std::vector<ngraph::Output<Node>> &in,
const std::vector<int> &reductionAxes,
std::shared_ptr<ngraph::Node> makeReduce(const ngraph::Output<Node>& data,
const ngraph::Output<Node>& axes,
bool keepDims,
ngraph::helpers::ReductionType reductionType);

View File

@ -9,30 +9,27 @@
namespace ngraph {
namespace builder {
std::shared_ptr<ngraph::Node> makeReduce(std::vector<ngraph::Output<Node>> &in,
const std::vector<int> &reductionAxes,
std::shared_ptr<ngraph::Node> makeReduce(const ngraph::Output<Node>& data,
const ngraph::Output<Node>& axes,
bool keepDims,
ngraph::helpers::ReductionType reductionType) {
auto reductionAxesNode = std::make_shared<ngraph::opset3::Constant>(ngraph::element::Type_t::i64,
ngraph::Shape({reductionAxes.size()}),
reductionAxes);
switch (reductionType) {
case helpers::Mean:
return std::make_shared<ngraph::opset3::ReduceMean>(in.at(0), reductionAxesNode, keepDims);
return std::make_shared<ngraph::opset3::ReduceMean>(data, axes, keepDims);
case helpers::Max:
return std::make_shared<ngraph::opset3::ReduceMax>(in.at(0), reductionAxesNode, keepDims);
return std::make_shared<ngraph::opset3::ReduceMax>(data, axes, keepDims);
case helpers::Min:
return std::make_shared<ngraph::opset3::ReduceMin>(in.at(0), reductionAxesNode, keepDims);
return std::make_shared<ngraph::opset3::ReduceMin>(data, axes, keepDims);
case helpers::Prod:
return std::make_shared<ngraph::opset3::ReduceProd>(in.at(0), reductionAxesNode, keepDims);
return std::make_shared<ngraph::opset3::ReduceProd>(data, axes, keepDims);
case helpers::Sum:
return std::make_shared<ngraph::opset3::ReduceSum>(in.at(0), reductionAxesNode, keepDims);
return std::make_shared<ngraph::opset3::ReduceSum>(data, axes, keepDims);
case helpers::LogicalOr:
return std::make_shared<ngraph::opset3::LogicalOr>(in.at(0), reductionAxesNode);
return std::make_shared<ngraph::opset3::LogicalOr>(data, axes);
case helpers::LogicalAnd:
return std::make_shared<ngraph::opset3::LogicalAnd>(in.at(0), reductionAxesNode);
return std::make_shared<ngraph::opset3::LogicalAnd>(data, axes);
case helpers::LogicalXor:
return std::make_shared<ngraph::opset3::LogicalXor>(in.at(0), reductionAxesNode);
return std::make_shared<ngraph::opset3::LogicalXor>(data, axes);
default:
throw std::runtime_error("Can't create layer for this reduction type");
}