[GPU] Convert AvgPool to Reduce for onednn (#13037)

This commit is contained in:
Jade Cho 2022-10-07 18:25:47 +09:00 committed by GitHub
parent c83ad806d9
commit 36f28e5618
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 142 additions and 0 deletions

View File

@ -590,6 +590,7 @@ dnnl::algorithm convert_activation_func(cldnn::activation_func func) {
case cldnn::activation_func::hyperbolic_tan: return dnnl::algorithm::eltwise_tanh;
case cldnn::activation_func::pow: return dnnl::algorithm::eltwise_pow;
case cldnn::activation_func::sqrt: return dnnl::algorithm::eltwise_sqrt;
case cldnn::activation_func::hard_sigmoid: return dnnl::algorithm::eltwise_hardsigmoid;
default: throw std::runtime_error("Unsupported activation func for onednn primitive " + std::to_string(static_cast<int>(func)));
}
}

View File

@ -0,0 +1,55 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "convert_pooling_to_reduce.hpp"
#include <algorithm>
#include <memory>
#include <vector>
#include <ngraph/opsets/opset9.hpp>
#include <ngraph/pattern/op/wrap_type.hpp>
#include <ngraph/rt_info.hpp>
ov::intel_gpu::ConvertAvgPoolingToReduce::ConvertAvgPoolingToReduce() {
// Check all AvgPool nodes
auto m = std::make_shared<ngraph::pattern::Matcher>(ngraph::pattern::wrap_type<ngraph::opset9::AvgPool>(), "ConvertAvgPoolingToReduce");
register_matcher(m, [&](ngraph::pattern::Matcher& m) {
auto pool = std::dynamic_pointer_cast<ngraph::opset9::AvgPool>(m.get_match_root());
if (!pool || transformation_callback(pool)) {
return false;
}
auto kernel = pool->get_kernel();
auto pads_begin = pool->get_pads_begin();
auto pads_end = pool->get_pads_end();
int64_t rank = pool->get_input_partial_shape(0).size();
auto input_shape = pool->get_input_shape(0);
// Check if input spatial size is same with kernel size.
bool has_same_spatial_size = rank > 2 && std::equal(input_shape.begin() + 2, input_shape.end(), kernel.begin());
// Check if pads are zeros.
bool no_padding =
std::count(pads_begin.begin(), pads_begin.end(), 0) == static_cast<int64_t>(pads_begin.size()) &&
std::count(pads_end.begin(), pads_end.end(), 0) == static_cast<int64_t>(pads_end.size());
if (!has_same_spatial_size || !no_padding) {
return false;
}
std::vector<int64_t> axes_shape(rank - 2);
std::iota(axes_shape.begin(), axes_shape.end(), 2);
auto reduce = std::make_shared<ngraph::opset9::ReduceMean>(
pool->input_value(0),
ngraph::opset9::Constant::create(ngraph::element::i64, ngraph::Shape{axes_shape.size()}, axes_shape),
true);
reduce->set_friendly_name(pool->get_friendly_name() + "/Reduce");
copy_runtime_info(pool, reduce);
replace_node(pool, reduce);
return true;
});
}

View File

@ -0,0 +1,20 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ngraph/pass/graph_rewrite.hpp>
#include <transformations_visibility.hpp>
namespace ov {
namespace intel_gpu {
class ConvertAvgPoolingToReduce : public ngraph::pass::MatcherPass {
public:
OPENVINO_RTTI("ConvertAvgPoolingToReduce", "0");
ConvertAvgPoolingToReduce();
};
} // namespace pass
} // namespace ngraph

View File

@ -26,6 +26,7 @@
#include <ie_algorithm.hpp>
#include "transformations/einsum_decomposition.hpp"
#include "transformations/convert_pooling_to_reduce.hpp"
#include <transformations/opset_conversions/convert_opset3_to_opset2.hpp>
#include <transformations/opset_conversions/convert_opset2_to_opset1.hpp>
@ -213,6 +214,7 @@ void TransformationsPipeline::apply(std::shared_ptr<ov::Model> func) {
pass_config->disable<ngraph::pass::ConvertReduceSumToPooling>();
pass_config->disable<ngraph::pass::ConvertReduceMeanToPooling>();
pass_config->disable<ngraph::pass::ConvertReduceMaxToPooling>();
manager.register_pass<ConvertAvgPoolingToReduce>();
} else {
pass_config->set_callback<ngraph::pass::ConvertReduceSumToPooling>(
[](const_node_ptr &node) -> bool {

View File

@ -17,6 +17,9 @@ file(GLOB_RECURSE SOURCES_MAIN
"${CMAKE_CURRENT_SOURCE_DIR}/*.h"
"${CMAKE_CURRENT_SOURCE_DIR}/*.hpp"
"${CMAKE_CURRENT_SOURCE_DIR}/*.cpp"
# ngraph graph transformation
"${CMAKE_HOME_DIRECTORY}/src/plugins/intel_gpu/src/plugin/transformations/*.hpp"
"${CMAKE_HOME_DIRECTORY}/src/plugins/intel_gpu/src/plugin/transformations/*.cpp"
)
if (MSVC)
@ -48,6 +51,7 @@ target_link_libraries(${TARGET_NAME} PRIVATE openvino_intel_gpu_graph
gtest_main
gflags
ngraph_reference
inference_engine_transformations
gmock)
target_include_directories(${TARGET_NAME} PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}

View File

@ -0,0 +1,60 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gtest/gtest.h>
#include <string>
#include <memory>
#include <ngraph/function.hpp>
#include <ngraph/opsets/opset9.hpp>
#include <ngraph/pass/manager.hpp>
#include "plugin/transformations/convert_pooling_to_reduce.hpp"
using namespace testing;
using namespace ov::intel_gpu;
static std::shared_ptr<ov::Model> CreateFunction(const ov::Shape& input_shape,
const ov::element::Type& input_type,
const ov::Strides& strides,
const ov::Shape& pads_begin,
const ov::Shape& pads_end,
const ov::Shape& kernel,
const bool exclude_pad,
const ov::op::RoundingType rounding_type,
const ov::op::PadType pad_type) {
const auto in = std::make_shared<ov::op::v0::Parameter>(input_type, input_shape);
const auto avgPool = std::make_shared<ov::op::v1::AvgPool>(in,
strides,
pads_begin,
pads_end,
kernel,
exclude_pad,
rounding_type,
pad_type);
return std::make_shared<ov::Model>(ov::NodeVector{avgPool}, ov::ParameterVector{in});
}
TEST(TransformationTests, ConvertAvgPoolToReduce) {
ngraph::pass::Manager manager;
manager.set_per_pass_validation(false);
manager.register_pass<ov::intel_gpu::ConvertAvgPoolingToReduce>();
auto func = CreateFunction(
{1, 3, 10, 10}, ov::element::Type_t::f16,
{1, 1}, {0, 0}, {0, 0}, {10, 10}, // stride, pads_begin, pads_end, kernel
false, ov::op::RoundingType::FLOOR, ov::op::PadType::VALID);
manager.run_passes(func);
bool success = false;
for (auto& ops : func->get_ops()) {
std::string type_name(ops->get_type_name());
if (type_name.find("ReduceMean") != std::string::npos) {
success = true;
break;
}
}
ASSERT_TRUE(success);
}