[IE CLDNN] 54304 fix reduce ops (#5986)

This commit is contained in:
Andrei Gorbachev 2021-06-07 18:36:38 +03:00 committed by GitHub
parent 56ada41bd0
commit 64d7a40ae4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 189 additions and 1 deletions

View File

@ -18,6 +18,7 @@
#include "api/reduce.hpp"
#include "api/reorder.hpp"
#include "api/reshape.hpp"
namespace CLDNNPlugin {
@ -78,6 +79,28 @@ void CreateReduceOp(Program& p, const std::shared_ptr<ngraph::Node>& op, cldnn::
p.AddPrimitive(reducePrim);
auto resultLayerName = layerName;
auto out_dims = op->get_output_shape(0).size();
if (out_dims == 3 && !keep_dims && rank >= 4) {
resultLayerName = layerName + "_reshape";
auto out_shape = op->get_output_shape(0);
cldnn::tensor outTensor;
switch (rank) {
case 6:
outTensor = cldnn::tensor(TensorValue(out_shape[0]), TensorValue(out_shape[1]),
1, TensorValue(out_shape[2]), 1, 1);
case 5:
outTensor = cldnn::tensor(TensorValue(out_shape[0]), TensorValue(out_shape[1]),
1, TensorValue(out_shape[2]), 1);
case 4:
outTensor = cldnn::tensor(TensorValue(out_shape[0]), TensorValue(out_shape[1]),
1, TensorValue(out_shape[2]));
}
auto reshape_prim = cldnn::reshape(resultLayerName, layerName, outTensor);
p.AddPrimitive(reshape_prim);
p.AddPrimitiveToProfiler(op, resultLayerName);
}
auto reorderLayerName = layerName + "_reorder";
cldnn::format out_format = cldnn::format::any;
auto out_dt = DataTypeFromPrecision(op->get_output_element_type(0));
@ -89,7 +112,7 @@ void CreateReduceOp(Program& p, const std::shared_ptr<ngraph::Node>& op, cldnn::
else if (rank - rawAxes.size() <= 4)
out_format = cldnn::format::bfyx;
auto reorder_prim = cldnn::reorder(reorderLayerName, layerName, out_format, out_dt);
auto reorder_prim = cldnn::reorder(reorderLayerName, resultLayerName, out_format, out_dt);
p.AddPrimitive(reorder_prim);
p.AddPrimitiveToProfiler(op, reorderLayerName);
} else {

View File

@ -0,0 +1,47 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include "subgraph_tests/reduce_eltwise.hpp"
using namespace SubgraphTestsDefinitions;
namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
};
INSTANTIATE_TEST_CASE_P(smoke_ReduceEltwise6D, ReduceEltwiseTest,
testing::Combine(
testing::Values(std::vector<size_t>{2, 3, 4, 5, 6, 7}),
testing::Values(std::vector<int>{2, 3, 4}),
testing::Values(CommonTestUtils::OpType::VECTOR),
testing::Values(false),
testing::ValuesIn(netPrecisions),
testing::Values(CommonTestUtils::DEVICE_GPU)),
ReduceEltwiseTest::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_ReduceEltwise5D, ReduceEltwiseTest,
testing::Combine(
testing::Values(std::vector<size_t>{2, 3, 4, 5, 6}),
testing::Values(std::vector<int>{2, 3}),
testing::Values(CommonTestUtils::OpType::VECTOR),
testing::Values(false),
testing::ValuesIn(netPrecisions),
testing::Values(CommonTestUtils::DEVICE_GPU)),
ReduceEltwiseTest::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_ReduceEltwise4D, ReduceEltwiseTest,
testing::Combine(
testing::Values(std::vector<size_t>{2, 3, 4, 5}),
testing::Values(std::vector<int>{2}),
testing::Values(CommonTestUtils::OpType::VECTOR),
testing::Values(false),
testing::ValuesIn(netPrecisions),
testing::Values(CommonTestUtils::DEVICE_GPU)),
ReduceEltwiseTest::getTestCaseName);
} // namespace

View File

@ -0,0 +1,15 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "shared_test_classes/subgraph/reduce_eltwise.hpp"
namespace SubgraphTestsDefinitions {
TEST_P(ReduceEltwiseTest, CompareWithRefs) {
Run();
};
} // namespace SubgraphTestsDefinitions

View File

@ -0,0 +1,36 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <tuple>
#include <string>
#include <vector>
#include <memory>
#include "shared_test_classes/base/layer_test_utils.hpp"
#include "ngraph_functions/builders.hpp"
#include "ngraph_functions/utils/ngraph_helpers.hpp"
#include "common_test_utils/test_constants.hpp"
namespace SubgraphTestsDefinitions {
using ReduceEltwiseParamsTuple = typename std::tuple<
std::vector<size_t>, // Input shapes
std::vector<int>, // Axis to reduce order
CommonTestUtils::OpType, // Scalar or vector type axis
bool, // Keep dims
InferenceEngine::Precision, // Network precision
std::string>; // Device name
class ReduceEltwiseTest:
public testing::WithParamInterface<ReduceEltwiseParamsTuple>,
public LayerTestsUtils::LayerTestsCommon{
public:
std::shared_ptr<ngraph::Function> fn;
static std::string getTestCaseName(const testing::TestParamInfo<ReduceEltwiseParamsTuple> &obj);
protected:
void SetUp() override;
};
} // namespace SubgraphTestsDefinitions

View File

@ -0,0 +1,67 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph_functions/builders.hpp"
#include "shared_test_classes/subgraph/reduce_eltwise.hpp"
namespace SubgraphTestsDefinitions {
std::string ReduceEltwiseTest::getTestCaseName(const testing::TestParamInfo<ReduceEltwiseParamsTuple> &obj) {
std::vector<size_t> inputShapes;
std::vector<int> axes;
CommonTestUtils::OpType opType;
bool keepDims;
InferenceEngine::Precision netPrecision;
std::string targetName;
std::tie(inputShapes, axes, opType, keepDims, netPrecision, targetName) = obj.param;
std::ostringstream result;
result << "IS=" << CommonTestUtils::vec2str(inputShapes) << "_";
result << "axes=" << CommonTestUtils::vec2str(axes) << "_";
result << "opType=" << opType << "_";
if (keepDims) result << "KeepDims_";
result << "netPRC=" << netPrecision.name() << "_";
result << "targetDevice=" << targetName;
return result.str();
}
void ReduceEltwiseTest::SetUp() {
std::vector<size_t> inputShape;
std::vector<int> axes;
CommonTestUtils::OpType opType;
bool keepDims;
InferenceEngine::Precision netPrecision;
std::string targetName;
std::tie(inputShape, axes, opType, keepDims, netPrecision, targetName) = this->GetParam();
auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
auto params = ngraph::builder::makeParams(ngPrc, {inputShape});
auto paramOuts = ngraph::helpers::convert2OutputVector(
ngraph::helpers::castOps2Nodes<ngraph::op::Parameter>(params));
std::vector<size_t> shapeAxes;
switch (opType) {
case CommonTestUtils::OpType::SCALAR: {
if (axes.size() > 1)
FAIL() << "In reduce op if op type is scalar, 'axis' input's must contain 1 element";
break;
}
case CommonTestUtils::OpType::VECTOR: {
shapeAxes.push_back(axes.size());
break;
}
default:
FAIL() << "Reduce op doesn't support operation type: " << opType;
}
auto reductionAxesNode = std::dynamic_pointer_cast<ngraph::Node>(
std::make_shared<ngraph::opset3::Constant>(ngraph::element::Type_t::i64, ngraph::Shape(shapeAxes), axes));
auto reduce = std::make_shared<ngraph::opset3::ReduceSum>(paramOuts[0], reductionAxesNode, keepDims);
std::vector<size_t> constShape(reduce.get()->get_output_size(), 1);
constShape[2] = inputShape.back();
auto constant = ngraph::builder::makeConstant<float>(ngPrc, constShape, {}, true);
auto eltw = ngraph::builder::makeEltwise(reduce, constant, ngraph::helpers::EltwiseTypes::MULTIPLY);
ngraph::ResultVector results{std::make_shared<ngraph::opset3::Result>(eltw)};
function = std::make_shared<ngraph::Function>(results, params, "ReduceEltwise");
}
} // namespace SubgraphTestsDefinitions