[GPU] AvgPool/MaxPool dynamic support (#13754)

This commit is contained in:
Roman Lyamin 2022-11-07 15:46:17 +04:00 committed by GitHub
parent 713eb9683f
commit 34b76584f7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 719 additions and 183 deletions

View File

@ -10,6 +10,8 @@
#include "openvino/core/shape.hpp"
#include "openvino/core/strides.hpp"
#include "openvino/op/util/attr_types.hpp"
namespace cldnn {
/// @addtogroup cpp_api C++ API
/// @{
@ -51,13 +53,17 @@ struct pooling : public primitive_base<pooling> {
const ov::Strides& stride,
const ov::Shape& pads_begin = {0, 0},
const ov::Shape& pads_end = {0, 0},
ov::op::PadType auto_pad = ov::op::PadType::EXPLICIT,
ov::op::RoundingType rounding_type = ov::op::RoundingType::FLOOR,
const padding& output_padding = padding())
: primitive_base(id, {input}, output_padding),
mode(static_cast<pooling_mode>(mode)),
size(size),
stride(stride),
pads_begin(pads_begin),
pads_end(pads_end),
stride(stride),
size(size),
auto_pad(auto_pad),
rounding_type(rounding_type),
with_output_size(false) {}
/// @brief Constructs pooling primitive with known output shape.
@ -80,10 +86,12 @@ struct pooling : public primitive_base<pooling> {
const padding& output_padding = padding())
: primitive_base(id, {input}, output_padding, optional_data_type{output_data_type}),
mode(static_cast<pooling_mode>(mode)),
size(size),
stride(stride),
pads_begin(pads_begin),
pads_end(pads_end),
stride(stride),
size(size),
auto_pad(ov::op::PadType::EXPLICIT),
rounding_type(ov::op::RoundingType::CEIL),
with_output_size(true),
output_size(output_size) {}
@ -107,6 +115,8 @@ struct pooling : public primitive_base<pooling> {
const ov::Strides& dilation,
const ov::Shape& pads_begin,
const ov::Shape& pads_end,
ov::op::PadType auto_pad,
ov::op::RoundingType rounding_type,
int64_t axis,
data_types index_element_type,
tensor output_size,
@ -115,14 +125,16 @@ struct pooling : public primitive_base<pooling> {
: primitive_base(id, {input, indices_output}, output_padding, optional_data_type{output_data_type}),
indices_output(indices_output),
mode(pooling_mode::max),
pads_begin(pads_begin),
pads_end(pads_end),
size(size),
stride(stride),
dilation(dilation),
size(size),
pads_begin(pads_begin),
pads_end(pads_end),
auto_pad(auto_pad),
rounding_type(rounding_type),
axis(axis),
with_output_size(true),
output_size(output_size),
axis(axis),
index_element_type(index_element_type),
maxPoolOpset8Features(true) {}
@ -130,22 +142,26 @@ struct pooling : public primitive_base<pooling> {
primitive_id indices_output;
/// @brief Pooling mode.
pooling_mode mode;
/// @brief Defines logical pad value added to input tensor.
ov::Shape pads_begin;
/// @brief Defines a shift, relative to the end of padding shape.
ov::Shape pads_end;
/// @brief Pooling kernel size.
ov::Shape size;
/// @brief Defines shift in input buffer between adjacent calculations of output values.
ov::Strides stride;
/// @brief Defines index of next pixel to select when pooling
ov::Strides dilation;
/// @brief Pooling kernel size.
ov::Shape size;
/// @brief Defines logical pad value added to input tensor.
ov::Shape pads_begin;
/// @brief Defines a shift, relative to the end of padding shape.
ov::Shape pads_end;
/// @brief Defines how the padding is calculated.
ov::op::PadType auto_pad;
/// @brief Defines a type of rounding to be applied.
ov::op::RoundingType rounding_type;
/// @brief first dimension of input that should be used to calculate the upper bound of index output.
int64_t axis = 0;
/// @brief Indicates that the primitive has user-defined output size (non-zero value).
bool with_output_size;
/// @brief User-defined output data size of the primitive (w/o padding).
tensor output_size;
/// @brief first dimension of input that should be used to calculate the upper bound of index output
int64_t axis = 0;
/// @brief type of index output
data_types index_element_type = data_types::i32;
bool maxPoolOpset8Features{false};

View File

@ -9,6 +9,7 @@
#include "kernel_selector_helper.h"
#include "pooling/pooling_kernel_selector.h"
#include "pooling/pooling_kernel_base.h"
#include "ngraph/validation_util.hpp"
#include <algorithm>
namespace cldnn {
@ -18,8 +19,8 @@ namespace {
void validate_args(const pooling_node& arg) {
auto input_rank = arg.input().get_output_layout().get_spatial_rank();
auto output_rank = arg.get_output_layout().get_spatial_rank();
auto stride_rank = arg.get_primitive()->stride.size();
auto window_rank = arg.get_primitive()->size.size();
auto stride_rank = std::max(arg.get_primitive()->stride.size(), static_cast<size_t>(2));
auto window_rank = std::max(arg.get_primitive()->size.size(), static_cast<size_t>(2));
CLDNN_ERROR_NOT_EQUAL(arg.id(), "input dimensions", input_rank, "output dimensions", output_rank, "");
CLDNN_ERROR_NOT_EQUAL(arg.id(), "stride dimensions", stride_rank, "output dimensions", output_rank, "");
@ -93,27 +94,54 @@ public:
pool_params.poolAxis = primitive->axis;
}
const auto& stride = primitive->stride;
const auto& pads_begin = primitive->pads_begin;
const auto& pads_end = primitive->pads_end;
const auto& dilation = primitive->dilation;
auto kernel = primitive->size;
const auto& input_layout = impl_param.input_layouts[0];
const auto& output_layout = impl_param.output_layout;
auto kernel = primitive->size;
auto stride = primitive->stride;
auto dilation = primitive->dilation.empty() ? ov::Strides(stride.size(), 1)
: primitive->dilation;
ov::CoordinateDiff pads_begin(primitive->pads_begin.begin(), primitive->pads_begin.end());
ov::CoordinateDiff pads_end(primitive->pads_end.begin(), primitive->pads_end.end());
auto auto_pad = primitive->auto_pad;
if (auto_pad == ov::op::PadType::SAME_UPPER || auto_pad == ov::op::PadType::SAME_LOWER) {
pads_begin.clear();
pads_end.clear();
ngraph::try_apply_auto_padding(input_layout.get_partial_shape(),
kernel,
stride,
dilation,
auto_pad,
pads_end,
pads_begin);
}
if (auto_pad == ov::op::PadType::VALID) {
pads_begin = ov::CoordinateDiff(pads_begin.size(), 0);
pads_end = ov::CoordinateDiff(pads_end.size(), 0);
}
auto spatial_rank = output_layout.get_spatial_rank();
kernel.resize(std::max<size_t>(2, kernel.size()), 1);
stride.resize(std::max<size_t>(2, stride.size()), 1);
dilation.resize(std::max<size_t>(2, dilation.size()), 1);
pads_begin.resize(std::max<size_t>(2, pads_begin.size()), 0);
pads_end.resize(std::max<size_t>(2, pads_end.size()), 0);
auto& pp = pool_params;
pp.poolType = cldnn_2_pool_type(primitive->mode);
pp.remainderAction = kernel_selector::pool_remainder::CEIL;
pp.remainderAction = primitive->rounding_type == ov::op::RoundingType::CEIL ? kernel_selector::pool_remainder::CEIL
: kernel_selector::pool_remainder::FLOOR;
// check if last pooling window goes outside of input size + padding. If so the avg pooling size will be
// adjusted to that, to work properly this calculation must take pad_end into account.
auto dynamic_mode = false;
for (size_t i = 0; i < spatial_rank; i++) {
dynamic_mode |= (((output_layout.spatial(i) - 1) * stride[spatial_rank - i - 1]) + primitive->size[spatial_rank - i - 1]) >
(pads_end[spatial_rank - i - 1] + pads_begin[spatial_rank - i - 1]) + input_layout.spatial(i);
static_cast<size_t>(pads_end[spatial_rank - i - 1] + pads_begin[spatial_rank - i - 1] + input_layout.spatial(i));
}
if (primitive->mode == pooling_mode::average && dynamic_mode)
@ -158,95 +186,38 @@ public:
namespace detail {
attach_pooling_impl::attach_pooling_impl() {
implementation_map<pooling>::add(impl_types::ocl, pooling_impl::create, {
std::make_tuple(data_types::f32, format::bfyx),
std::make_tuple(data_types::f16, format::bfyx),
std::make_tuple(data_types::i8, format::bfyx),
std::make_tuple(data_types::u8, format::bfyx),
std::set<implementation_map<resample>::key_type> keys;
std::make_tuple(data_types::f32, format::yxfb),
std::make_tuple(data_types::f16, format::yxfb),
std::make_tuple(data_types::i8, format::yxfb),
std::make_tuple(data_types::u8, format::yxfb),
auto types = { data_types::f16, data_types::f32, data_types::i8, data_types::u8 };
auto formats = { format::bfyx,
format::byxf,
format::yxfb,
format::b_fs_yx_fsv4,
format::b_fs_yx_fsv16,
format::b_fs_yx_fsv32,
format::bs_fs_yx_bsv16_fsv16,
format::bs_fs_yx_bsv16_fsv32,
format::bs_fs_yx_bsv32_fsv16,
format::bs_fs_yx_bsv32_fsv32,
std::make_tuple(data_types::f32, format::byxf),
std::make_tuple(data_types::f16, format::byxf),
std::make_tuple(data_types::i8, format::byxf),
std::make_tuple(data_types::u8, format::byxf),
format::bfzyx,
format::b_fs_zyx_fsv16,
format::b_fs_zyx_fsv32,
format::bs_fs_zyx_bsv16_fsv16,
format::bs_fs_zyx_bsv16_fsv32,
format::bs_fs_zyx_bsv32_fsv16,
format::bs_fs_zyx_bsv32_fsv32 };
std::make_tuple(data_types::f16, format::b_fs_yx_fsv16),
std::make_tuple(data_types::f32, format::b_fs_yx_fsv16),
std::make_tuple(data_types::i8, format::b_fs_yx_fsv16),
std::make_tuple(data_types::u8, format::b_fs_yx_fsv16),
for (const auto type : types) {
for (const auto format : formats) {
keys.emplace(type, format);
}
}
std::make_tuple(data_types::f32, format::bs_fs_yx_bsv16_fsv16),
std::make_tuple(data_types::f16, format::bs_fs_yx_bsv16_fsv16),
std::make_tuple(data_types::i8, format::bs_fs_yx_bsv16_fsv16),
std::make_tuple(data_types::u8, format::bs_fs_yx_bsv16_fsv16),
keys.emplace(data_types::f16, format::fs_b_yx_fsv32);
keys.emplace(data_types::f32, format::fs_b_yx_fsv32);
std::make_tuple(data_types::f32, format::bs_fs_yx_bsv16_fsv32),
std::make_tuple(data_types::f16, format::bs_fs_yx_bsv16_fsv32),
std::make_tuple(data_types::i8, format::bs_fs_yx_bsv16_fsv32),
std::make_tuple(data_types::u8, format::bs_fs_yx_bsv16_fsv32),
std::make_tuple(data_types::f32, format::bfzyx),
std::make_tuple(data_types::f16, format::bfzyx),
std::make_tuple(data_types::i8, format::bfzyx),
std::make_tuple(data_types::u8, format::bfzyx),
std::make_tuple(data_types::f32, format::b_fs_zyx_fsv16),
std::make_tuple(data_types::f16, format::b_fs_zyx_fsv16),
std::make_tuple(data_types::i8, format::b_fs_zyx_fsv16),
std::make_tuple(data_types::u8, format::b_fs_zyx_fsv16),
std::make_tuple(data_types::f32, format::bs_fs_zyx_bsv16_fsv16),
std::make_tuple(data_types::f16, format::bs_fs_zyx_bsv16_fsv16),
std::make_tuple(data_types::i8, format::bs_fs_zyx_bsv16_fsv16),
std::make_tuple(data_types::u8, format::bs_fs_zyx_bsv16_fsv16),
std::make_tuple(data_types::f32, format::bs_fs_zyx_bsv16_fsv32),
std::make_tuple(data_types::f16, format::bs_fs_zyx_bsv16_fsv32),
std::make_tuple(data_types::i8, format::bs_fs_zyx_bsv16_fsv32),
std::make_tuple(data_types::u8, format::bs_fs_zyx_bsv16_fsv32),
std::make_tuple(data_types::f32, format::bs_fs_zyx_bsv32_fsv16),
std::make_tuple(data_types::f16, format::bs_fs_zyx_bsv32_fsv16),
std::make_tuple(data_types::i8, format::bs_fs_zyx_bsv32_fsv16),
std::make_tuple(data_types::u8, format::bs_fs_zyx_bsv32_fsv16),
std::make_tuple(data_types::f32, format::b_fs_yx_fsv4),
std::make_tuple(data_types::f16, format::b_fs_yx_fsv4),
std::make_tuple(data_types::i8, format::b_fs_yx_fsv4),
std::make_tuple(data_types::u8, format::b_fs_yx_fsv4),
std::make_tuple(data_types::i8, format::b_fs_yx_fsv32),
std::make_tuple(data_types::u8, format::b_fs_yx_fsv32),
std::make_tuple(data_types::f32, format::b_fs_yx_fsv32),
std::make_tuple(data_types::f16, format::b_fs_yx_fsv32),
std::make_tuple(data_types::i8, format::b_fs_zyx_fsv32),
std::make_tuple(data_types::u8, format::b_fs_zyx_fsv32),
std::make_tuple(data_types::f32, format::b_fs_zyx_fsv32),
std::make_tuple(data_types::f16, format::b_fs_zyx_fsv32),
std::make_tuple(data_types::f16, format::fs_b_yx_fsv32),
std::make_tuple(data_types::f32, format::fs_b_yx_fsv32),
std::make_tuple(data_types::f32, format::bs_fs_yx_bsv32_fsv32),
std::make_tuple(data_types::f16, format::bs_fs_yx_bsv32_fsv32),
std::make_tuple(data_types::i8, format::bs_fs_yx_bsv32_fsv32),
std::make_tuple(data_types::u8, format::bs_fs_yx_bsv32_fsv32),
std::make_tuple(data_types::f32, format::bs_fs_yx_bsv32_fsv16),
std::make_tuple(data_types::f16, format::bs_fs_yx_bsv32_fsv16),
std::make_tuple(data_types::i8, format::bs_fs_yx_bsv32_fsv16),
std::make_tuple(data_types::u8, format::bs_fs_yx_bsv32_fsv16),
std::make_tuple(data_types::f32, format::bs_fs_zyx_bsv32_fsv32),
std::make_tuple(data_types::f16, format::bs_fs_zyx_bsv32_fsv32),
std::make_tuple(data_types::i8, format::bs_fs_zyx_bsv32_fsv32),
std::make_tuple(data_types::u8, format::bs_fs_zyx_bsv32_fsv32),
});
implementation_map<pooling>::add(impl_types::ocl, pooling_impl::create, keys);
}
} // namespace detail

View File

@ -23,6 +23,7 @@ public:
using parent::parent;
program_node& input() const { return get_dependency(0); }
program_node& argmax() const { return get_dependency(1); }
std::vector<size_t> get_shape_infer_dependencies() const override { return {}; }
};
using pooling_node = typed_program_node<pooling>;
@ -32,6 +33,8 @@ class typed_primitive_inst<pooling> : public typed_primitive_inst_base<pooling>
using parent = typed_primitive_inst_base<pooling>;
public:
template<typename ShapeType>
static std::vector<layout> calc_output_layouts(pooling_node const& /*node*/, const kernel_impl_params& impl_param);
static layout calc_output_layout(pooling_node const& node, kernel_impl_params const& impl_param);
static std::string to_string(pooling_node const& node);

View File

@ -3,10 +3,14 @@
//
#include "pooling_inst.h"
#include "primitive_type_base.h"
#include "sliding_window_utils.hpp"
#include "ngraph/validation_util.hpp"
#include "intel_gpu/runtime/error_handler.hpp"
#include "primitive_type_base.h"
#include "json_object.h"
#include <string>
using namespace ov::intel_gpu;
@ -144,6 +148,110 @@ layout pooling_inst::calc_output_layout(parent::typed_node const& node, kernel_i
return {output_type, input_layout.format, output_size};
}
template<typename ShapeType>
std::vector<layout> pooling_inst::calc_output_layouts(pooling_node const& /*node*/, const kernel_impl_params& impl_param) {
auto desc = impl_param.typed_desc<pooling>();
auto input_layout = impl_param.get_input_layout();
auto input_shape = input_layout.get<ShapeType>();
auto output_dtype = input_layout.data_type;
if (output_dtype == data_types::u8 || output_dtype == data_types::i8) {
if (desc->mode == pooling_mode::average_no_padding || desc->mode == pooling_mode::average) {
output_dtype = data_types::f32;
}
}
if (impl_param.has_fused_primitives()) {
output_dtype = impl_param.get_fused_output_layout().data_type;
// pooling doesn't support i32 data type
// FIXME: Someday delete this, when pooling supports i32 output.
if (desc->mode == pooling_mode::max && output_dtype == data_types::i32) {
output_dtype = data_types::f32;
}
}
ShapeType output_shape = ShapeType::dynamic(input_shape.size());
output_shape[0] = input_shape[0];
output_shape[1] = input_shape[1];
if (input_shape.is_dynamic()) {
return { layout{output_shape, input_layout.data_type, input_layout.format} };
}
if (desc->with_output_size) {
CLDNN_ERROR_LESS_OR_EQUAL_THAN(desc->id,
"User-defined size of output X",
desc->output_size.spatial[0],
"",
0,
"User-defined size of output layout (spatial X) must be positive (>= 1)");
CLDNN_ERROR_LESS_OR_EQUAL_THAN(desc->id,
"User-defined size of output Y",
desc->output_size.spatial[1],
"",
0,
"User-defined size of output layout (spatial Y) must be positive (>= 1)");
CLDNN_ERROR_LESS_OR_EQUAL_THAN(desc->id,
"User-defined size of output Z",
desc->output_size.spatial[2],
"",
0,
"User-defined size of output layout (spatial Z) must be positive (>= 1)");
tensor output_size(input_layout.batch(),
input_layout.feature(),
desc->output_size.spatial[0],
desc->output_size.spatial[1],
desc->output_size.spatial[2]);
return {{output_dtype, input_layout.format, output_size}};
}
auto kernel_size = desc->size;
auto stride = desc->stride;
auto dilation = desc->dilation.empty() ? ov::Strides(stride.size(), 1)
: desc->dilation;
bool ceil_mod = desc->rounding_type == ov::op::RoundingType::CEIL;
auto is_positive_values = [](const std::vector<size_t>& values) {
return !std::any_of(values.begin(), values.end(), [](size_t val) { return val == 0; });
};
OPENVINO_ASSERT(is_positive_values(kernel_size), "Size of pooling window must be positive (>= 1)");
OPENVINO_ASSERT(is_positive_values(stride), "Strides must be positive (>= 1)");
OPENVINO_ASSERT(is_positive_values(dilation), "Dilations must be positive (>= 1)");
ov::CoordinateDiff pads_begin(desc->pads_begin.begin(), desc->pads_begin.end());
ov::CoordinateDiff pads_end(desc->pads_end.begin(), desc->pads_end.end());
auto auto_pad = desc->auto_pad;
if (auto_pad == ov::op::PadType::SAME_UPPER || auto_pad == ov::op::PadType::SAME_LOWER) {
pads_begin.clear();
pads_end.clear();
ngraph::try_apply_auto_padding(input_shape,
kernel_size,
stride,
dilation,
auto_pad,
pads_end,
pads_begin);
}
if (auto_pad == ov::op::PadType::VALID) {
pads_begin = ov::CoordinateDiff(pads_begin.size(), 0);
pads_end = ov::CoordinateDiff(pads_end.size(), 0);
}
size_t spatial_size = input_shape.size() - 2;
for (size_t i = 0; i < spatial_size; ++i) {
int64_t padded_input_dim = input_shape[i + 2].get_length() + pads_begin[i] + pads_end[i];
int64_t kernel_dilated_dim = dilation[i] * (kernel_size[i] - 1) + 1;
int64_t out_dim = ceil_mod ? ceil_div(padded_input_dim - kernel_dilated_dim, stride[i]) + 1 :
(padded_input_dim - kernel_dilated_dim) / stride[i] + 1;
output_shape[i + 2] = out_dim;
}
return { layout{output_shape, output_dtype, input_layout.format} };
}
std::string pooling_inst::to_string(pooling_node const& node) {
auto desc = node.get_primitive();
auto strd = desc->stride;

View File

@ -19,27 +19,31 @@ static void CreateAvgPoolOp(Program& p, const std::shared_ptr<ngraph::op::v1::Av
auto inputPrimitives = p.GetInputPrimitiveIDs(op);
std::string layerName = layer_type_name_ID(op);
auto kernel = op->get_kernel();
auto strides = op->get_strides();
auto pads_begin = op->get_pads_begin();
auto pads_end = op->get_pads_end();
// Extend 1d vectors to 2d as 1d can't be handled properly by the graph optimizer for now
kernel.resize(std::max<size_t>(2, kernel.size()), 1);
strides.resize(std::max<size_t>(2, strides.size()), 1);
pads_begin.resize(std::max<size_t>(2, pads_begin.size()), 0);
pads_end.resize(std::max<size_t>(2, pads_end.size()), 0);
auto poolPrim = cldnn::pooling(layerName,
std::shared_ptr<cldnn::pooling> pooling_prim = nullptr;
if (p.use_new_shape_infer()) {
pooling_prim = std::make_shared<cldnn::pooling>(layerName,
inputPrimitives[0],
op->get_exclude_pad() ? cldnn::pooling_mode::average_no_padding : cldnn::pooling_mode::average,
kernel,
strides,
pads_begin,
pads_end,
op->get_exclude_pad() ? cldnn::pooling_mode::average_no_padding
: cldnn::pooling_mode::average,
op->get_kernel(),
op->get_strides(),
op->get_pads_begin(),
op->get_pads_end(),
op->get_auto_pad(),
op->get_rounding_type());
} else {
pooling_prim = std::make_shared<cldnn::pooling>(layerName,
inputPrimitives[0],
op->get_exclude_pad() ? cldnn::pooling_mode::average_no_padding
: cldnn::pooling_mode::average,
op->get_kernel(),
op->get_strides(),
op->get_pads_begin(),
op->get_pads_end(),
tensor_from_dims(op->get_output_shape(0)),
cldnn::element_type_to_data_type(op->get_output_element_type(0)));
p.add_primitive(*op, poolPrim);
}
p.add_primitive(*op, pooling_prim);
}
static void CreateMaxPoolOp(Program& p, const std::shared_ptr<ngraph::op::v1::MaxPool>& op) {
@ -47,27 +51,29 @@ static void CreateMaxPoolOp(Program& p, const std::shared_ptr<ngraph::op::v1::Ma
auto inputPrimitives = p.GetInputPrimitiveIDs(op);
std::string layerName = layer_type_name_ID(op);
auto kernel = op->get_kernel();
auto strides = op->get_strides();
auto pads_begin = op->get_pads_begin();
auto pads_end = op->get_pads_end();
// Extend 1d vectors to 2d as 1d can't be handled properly by the graph optimizer for now
kernel.resize(std::max<size_t>(2, kernel.size()), 1);
strides.resize(std::max<size_t>(2, strides.size()), 1);
pads_begin.resize(std::max<size_t>(2, pads_begin.size()), 0);
pads_end.resize(std::max<size_t>(2, pads_end.size()), 0);
auto poolPrim = cldnn::pooling(layerName,
std::shared_ptr<cldnn::pooling> pooling_prim = nullptr;
if (p.use_new_shape_infer()) {
pooling_prim = std::make_shared<cldnn::pooling>(layerName,
inputPrimitives[0],
cldnn::pooling_mode::max,
kernel,
strides,
pads_begin,
pads_end,
op->get_kernel(),
op->get_strides(),
op->get_pads_begin(),
op->get_pads_end(),
op->get_auto_pad(),
op->get_rounding_type());
} else {
pooling_prim = std::make_shared<cldnn::pooling>(layerName,
inputPrimitives[0],
cldnn::pooling_mode::max,
op->get_kernel(),
op->get_strides(),
op->get_pads_begin(),
op->get_pads_end(),
tensor_from_dims(op->get_output_shape(0)),
cldnn::element_type_to_data_type(op->get_output_element_type(0)));
p.add_primitive(*op, poolPrim);
}
p.add_primitive(*op, pooling_prim);
}
static void CreateMaxPoolOp(Program& p, const std::shared_ptr<ngraph::op::v8::MaxPool>& op) {
@ -91,27 +97,16 @@ static void CreateMaxPoolOp(Program& p, const std::shared_ptr<ngraph::op::v8::Ma
p.add_primitive(*op, indices_mutable_prim);
inputPrimitives.push_back(maxpool_mutable_id_w);
auto kernel = op->get_kernel();
auto strides = op->get_strides();
auto pads_begin = op->get_pads_begin();
auto pads_end = op->get_pads_end();
auto dilations = op->get_dilations();
// Extend 1d vectors to 2d as 1d can't be handled properly by the graph optimizer for now
kernel.resize(std::max<size_t>(2, kernel.size()), 1);
strides.resize(std::max<size_t>(2, strides.size()), 1);
pads_begin.resize(std::max<size_t>(2, pads_begin.size()), 0);
pads_end.resize(std::max<size_t>(2, pads_end.size()), 0);
dilations.resize(std::max<size_t>(2, dilations.size()), 1);
auto poolPrim = cldnn::pooling(layerName,
inputPrimitives[0],
inputPrimitives.back(),
kernel,
strides,
dilations,
pads_begin,
pads_end,
op->get_kernel(),
op->get_strides(),
op->get_dilations(),
op->get_pads_begin(),
op->get_pads_end(),
op->get_auto_pad(),
op->get_rounding_type(),
op->get_axis(),
cldnn::element_type_to_data_type(op->get_index_element_type()),
tensor_from_dims(op->get_output_shape(0)),

View File

@ -0,0 +1,90 @@
// Copyright (C) 2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "test_utils.h"
#include <intel_gpu/primitives/input_layout.hpp>
#include <intel_gpu/primitives/pooling.hpp>
#include "pooling_inst.h"
#include "program_wrapper.h"
using namespace cldnn;
using namespace ::tests;
namespace shape_infer_tests {
struct pooling_test_params {
layout data_layout;
pooling_mode mode;
ov::Shape kernel_size;
ov::Strides stride;
ov::Shape pads_begin;
ov::Shape pads_end;
ov::op::PadType auto_pad;
layout expected_layout;
};
class pooling_si_test : public testing::TestWithParam<pooling_test_params> { };
TEST_P(pooling_si_test, shape_infer) {
auto p = GetParam();
auto& engine = get_test_engine();
auto data_layout_prim = std::make_shared<input_layout>("data", p.data_layout);
auto pooling_prim = std::make_shared<pooling>("output", "data", p.mode, p.kernel_size, p.stride, p.pads_begin, p.pads_end, p.auto_pad);
cldnn::program prog(engine);
auto& data_node = prog.get_or_create(data_layout_prim);
auto& pooling_node = prog.get_or_create(pooling_prim);
program_wrapper::add_connection(prog, data_node, pooling_node);
auto res = pooling_inst::calc_output_layouts<ov::PartialShape>(pooling_node, *pooling_node.get_kernel_impl_params());
ASSERT_EQ(res.size(), 1);
ASSERT_EQ(res[0], p.expected_layout);
}
INSTANTIATE_TEST_SUITE_P(smoke, pooling_si_test,
testing::ValuesIn(std::vector<pooling_test_params>{
{
layout{ov::PartialShape{1, 3, 32, 32}, data_types::f32, format::bfyx},
pooling_mode::max, {2, 2}, {2, 2}, {1, 1}, {1, 1}, ov::op::PadType::EXPLICIT,
layout{ov::PartialShape{1, 3, 17, 17}, data_types::f32, format::bfyx}
},
{
layout{ov::PartialShape{1, 3, 32, 32}, data_types::f32, format::bfyx},
pooling_mode::average, {5, 5}, {2, 2}, {1, 1}, {1, 1}, ov::op::PadType::EXPLICIT,
layout{ov::PartialShape{1, 3, 15, 15}, data_types::f32, format::bfyx}
},
{
layout{ov::PartialShape{1, 3, 32, 32}, data_types::f32, format::bfyx},
pooling_mode::average_no_padding, {5, 5}, {3, 3}, {1, 1}, {1, 1}, ov::op::PadType::EXPLICIT,
layout{ov::PartialShape{1, 3, 10, 10}, data_types::f32, format::bfyx}
},
{
layout{ov::PartialShape{1, 3, 32, 32}, data_types::f32, format::bfyx},
pooling_mode::average, {5, 5}, {2, 2}, {0, 0}, {1, 1}, ov::op::PadType::SAME_UPPER,
layout{ov::PartialShape{1, 3, 16, 16}, data_types::f32, format::bfyx}
},
{
layout{ov::PartialShape{1, 3, 32, 32}, data_types::f32, format::bfyx},
pooling_mode::max, {2, 2}, {2, 2}, {1, 1}, {1, 1}, ov::op::PadType::VALID,
layout{ov::PartialShape{1, 3, 16, 16}, data_types::f32, format::bfyx}
},
{
layout{ov::PartialShape{1, -1, -1, 32}, data_types::f32, format::bfyx},
pooling_mode::max, {2, 2}, {2, 2}, {1, 1}, {1, 1}, ov::op::PadType::EXPLICIT,
layout{ov::PartialShape{1, -1, -1, -1}, data_types::f32, format::bfyx}
},
{
layout{ov::PartialShape::dynamic(4), data_types::f32, format::bfyx},
pooling_mode::max, {2, 2}, {2, 2}, {1, 1}, {1, 1}, ov::op::PadType::EXPLICIT,
layout{ov::PartialShape::dynamic(4), data_types::f32, format::bfyx}
}
}));
} // shape_infer_tests

View File

@ -825,7 +825,7 @@ TEST(pooling_forward_gpu, offsets_avg_yxfb_bfyx_f32_wsiz2x2_wstr2x2_i2x2x1x1_out
topology topology;
topology.add(input_layout("input_prim", input_prim->get_layout()));
topology.add(pooling("pool_prim", "input_prim", pooling_mode::average, {2, 2}, {2, 2}, {1, 1}, {1, 1}, padding{{0, 0, 2, 2}, 0}));
topology.add(pooling("pool_prim", "input_prim", pooling_mode::average, {2, 2}, {2, 2}, {1, 1}, {1, 1}, ov::op::PadType::EXPLICIT, ov::op::RoundingType::FLOOR, padding{{0, 0, 2, 2}, 0}));
network network(engine, topology);
set_values(input_prim, { 1.5f, -0.5f, -1.0f, 0.5f });
@ -886,7 +886,7 @@ TEST(pooling_forward_gpu, offsets_max_yxfb_bfyx_f32_wsiz2x2_wstr2x2_i3x3x1x1_out
topology topology;
topology.add(input_layout("input_prim", input_prim->get_layout()));
topology.add(pooling("pool_prim", "input_prim", pooling_mode::max, {2, 2}, {2, 2}, {1, 1}, {1, 1}, padding{{0, 0, 1, 1}, 0}));
topology.add(pooling("pool_prim", "input_prim", pooling_mode::max, {2, 2}, {2, 2}, {1, 1}, {1, 1}, ov::op::PadType::EXPLICIT, ov::op::RoundingType::FLOOR, padding{{0, 0, 1, 1}, 0}));
network network(engine, topology);
@ -957,7 +957,7 @@ TEST(pooling_forward_gpu, offsets_avg_yxfb_bfyx_f32_wsiz2x2_wstr2x2_i2x2x1x1_inp
topology topology;
topology.add(input_layout("input_prim", input_prim->get_layout()));
topology.add(reorder("reorder", "input_prim", input_prim->get_layout().with_padding(padding{ {0,0,1,2}, 0 })));
topology.add(pooling("pool_prim", "reorder", pooling_mode::average, {2, 2}, {2, 2}, {1, 1}, {1, 1}, padding{{0, 0, 2, 2}, 0}));
topology.add(pooling("pool_prim", "reorder", pooling_mode::average, {2, 2}, {2, 2}, {1, 1}, {1, 1}, ov::op::PadType::EXPLICIT, ov::op::RoundingType::FLOOR, padding{{0, 0, 2, 2}, 0}));
network network(engine, topology);
set_values(input_prim, { 1.5f, -0.5f, -1.0f, 0.5f });
@ -1020,7 +1020,7 @@ TEST(pooling_forward_gpu, offsets_max_yxfb_bfyx_f32_wsiz2x2_wstr2x2_i3x3x1x1_inp
topology topology;
topology.add(input_layout("input_prim", input_prim->get_layout()));
topology.add(reorder("reorder", "input_prim", input_prim->get_layout().with_padding(padding{ { 0, 0, 1, 2 }, 0 })));
topology.add(pooling("pool_prim", "reorder", pooling_mode::max, {2, 2}, {2, 2}, {1, 1}, {1, 1}, padding{{0, 0, 1, 1}, 0}));
topology.add(pooling("pool_prim", "reorder", pooling_mode::max, {2, 2}, {2, 2}, {1, 1}, {1, 1}, ov::op::PadType::EXPLICIT, ov::op::RoundingType::FLOOR, padding{{0, 0, 1, 1}, 0}));
network network(engine, topology);
@ -1091,7 +1091,7 @@ TEST(pooling_forward_gpu, avg_yxfb_bfyx_f32_wsiz2x2_wstr2x2_i2x2x1x1_inpad2x1_ou
topology topology;
topology.add(input_layout("input_prim", input_prim->get_layout()));
topology.add(reorder("reorder", "input_prim", input_prim->get_layout().with_padding(padding{ { 0, 0, 2, 1 }, 0 })));
topology.add(pooling("pool_prim", "reorder", pooling_mode::average, { 2, 2 }, { 2, 2 }, { 0, 0 }, { 0, 0 }, padding{ { 0, 0, 2, 2 }, 0 }));
topology.add(pooling("pool_prim", "reorder", pooling_mode::average, { 2, 2 }, { 2, 2 }, { 0, 0 }, { 0, 0 }, ov::op::PadType::EXPLICIT, ov::op::RoundingType::FLOOR, padding{ { 0, 0, 2, 2 }, 0 }));
network network(engine, topology);
set_values(input_prim, {
@ -1159,7 +1159,7 @@ TEST(pooling_forward_gpu, max_yxfb_bfyx_f32_wsiz2x2_wstr2x2_i3x3x1x1_inpad2x1_ou
topology topology;
topology.add(input_layout("input_prim", input_prim->get_layout()));
topology.add(reorder("reorder", "input_prim", input_prim->get_layout().with_padding(padding{ { 0, 0, 2, 1 }, 0 })));
topology.add(pooling("pool_prim", "reorder", pooling_mode::max, { 2, 2}, { 2, 2}, {1, 1}, {1, 1}, padding{{0, 0, 1, 1}, 0}));
topology.add(pooling("pool_prim", "reorder", pooling_mode::max, { 2, 2}, { 2, 2}, {1, 1}, {1, 1}, ov::op::PadType::EXPLICIT, ov::op::RoundingType::FLOOR, padding{{0, 0, 1, 1}, 0}));
network network(engine, topology);
@ -1717,7 +1717,7 @@ TEST(pooling_forward_gpu, fs_b_yx_fsv32_max_1x1x3x3_input_2x2_pool_2x2_stride_2x
topology topology;
topology.add(input_layout("input_prim", input_prim->get_layout()));
topology.add(reorder("reorder_input", "input_prim", layout(data_types::f16, format::fs_b_yx_fsv32, input_tensor)));
topology.add(pooling("pool_prim", "reorder_input", pooling_mode::max, {2, 2}, {2, 2}, {1, 1}, {1, 1}, padding{{0, 0, 1, 1}, 0}));
topology.add(pooling("pool_prim", "reorder_input", pooling_mode::max, {2, 2}, {2, 2}, {1, 1}, {1, 1}, ov::op::PadType::EXPLICIT, ov::op::RoundingType::FLOOR, padding{{0, 0, 1, 1}, 0}));
topology.add(reorder("reorder_pooling", "pool_prim", layout(data_types::f16, format::bfyx, { 1,1,4,4 }, padding{ { 0, 0, 1, 1 }, 0 })));
network network(engine, topology);
@ -1790,7 +1790,7 @@ TEST(pooling_forward_gpu, fs_b_yx_fsv32_max_1x1x5x5_input_2x2_pool_2x2_stride_2x
topology topology;
topology.add(input_layout("input_prim", input_prim->get_layout()));
topology.add(reorder("reorder_input", "input_prim", layout(data_types::f16, format::fs_b_yx_fsv32, input_tensor, padding{ { 0, 0, 2, 1 } , 0 })));
topology.add(pooling("pool_prim", "reorder_input", pooling_mode::max, {2, 2}, {2, 2}, {1, 1}, {1, 1}, padding{{0, 0, 1, 1}, 0}));
topology.add(pooling("pool_prim", "reorder_input", pooling_mode::max, {2, 2}, {2, 2}, {1, 1}, {1, 1}, ov::op::PadType::EXPLICIT, ov::op::RoundingType::FLOOR, padding{{0, 0, 1, 1}, 0}));
topology.add(reorder("reorder_pooling", "pool_prim", layout(data_types::f16, format::bfyx, input_tensor, padding{ { 0, 0, 1, 1 }, 0 })));
network network(engine, topology);
@ -1867,7 +1867,7 @@ TEST(pooling_forward_gpu, fs_b_yx_fsv32_avg_65x5x6x7_input_3x3_pool_4x4_stride_3
topology golden_topology;
golden_topology.add(input_layout("input", input_prim->get_layout()));
golden_topology.add(reorder("reorder_input", "input", input_prim->get_layout().with_padding(padding{ {0,0,x_in_pad,y_in_pad},0 })));
golden_topology.add(pooling("golden_pooling", "reorder_input", pooling_mode::average, { pool_size, pool_size }, { stride_size, stride_size }, { 0, 0 }, { 0, 0 }, padding{ { 0, 0, x_out_pad, y_out_pad }, 0 }));
golden_topology.add(pooling("golden_pooling", "reorder_input", pooling_mode::average, { pool_size, pool_size }, { stride_size, stride_size }, { 0, 0 }, { 0, 0 }, ov::op::PadType::EXPLICIT, ov::op::RoundingType::FLOOR, padding{ { 0, 0, x_out_pad, y_out_pad }, 0 }));
network golden_network(engine, golden_topology);
golden_network.set_input_data("input", input_prim);
@ -1884,7 +1884,7 @@ TEST(pooling_forward_gpu, fs_b_yx_fsv32_avg_65x5x6x7_input_3x3_pool_4x4_stride_3
topology golden_topology;
golden_topology.add(input_layout("input", input_prim->get_layout()));
golden_topology.add(reorder("reorder_input", "input", layout(data_types::f16, format::fs_b_yx_fsv32, input_tensor, padding{ {0, 0, x_in_pad, y_in_pad}, 0 })));
golden_topology.add(pooling("fsv32_pooling", "reorder_input", pooling_mode::average, { pool_size, pool_size }, { stride_size, stride_size }, { 0, 0 }, { 0, 0 }, padding{ { 0, 0, x_out_pad, y_out_pad }, 0 }));
golden_topology.add(pooling("fsv32_pooling", "reorder_input", pooling_mode::average, { pool_size, pool_size }, { stride_size, stride_size }, { 0, 0 }, { 0, 0 }, ov::op::PadType::EXPLICIT, ov::op::RoundingType::FLOOR, padding{ { 0, 0, x_out_pad, y_out_pad }, 0 }));
golden_topology.add(reorder("reorder_pooling", "fsv32_pooling", layout(data_types::f16, format::bfyx, input_tensor, padding{ { 0,0,x_out_pad,y_out_pad },0 })));
network fsv32_network(engine, golden_topology);
@ -2914,10 +2914,10 @@ public:
all_layer_params.emplace_back(new pooling("pooling", "reorder0", pooling_mode, size, stride));
// Output padding
all_layer_params.emplace_back(new pooling("pooling", "input0", pooling_mode, size, stride, generate_pad(2, 3, size), generate_pad(2, 3, size), { { 0, 0, 1, 5 }, { 0, 0, 19, 4 } }));
all_layer_params.emplace_back(new pooling("pooling", "input0", pooling_mode, size, stride, generate_pad(2, 3, size), generate_pad(2, 3, size), ov::op::PadType::EXPLICIT, ov::op::RoundingType::FLOOR, { { 0, 0, 1, 5 }, { 0, 0, 19, 4 } }));
// Input + output padding
all_layer_params.emplace_back(new pooling("pooling", "reorder0", pooling_mode, size, stride, generate_pad(2, 3, size), generate_pad(2, 3, size), { { 0, 0, 2, 1 }, { 0, 0, 3, 4 } }));
all_layer_params.emplace_back(new pooling("pooling", "reorder0", pooling_mode, size, stride, generate_pad(2, 3, size), generate_pad(2, 3, size), ov::op::PadType::EXPLICIT, ov::op::RoundingType::FLOOR, { { 0, 0, 2, 1 }, { 0, 0, 3, 4 } }));
}
}
}

View File

@ -0,0 +1,353 @@
// Copyright (C) 2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph_functions/builders.hpp"
#include "shared_test_classes/single_layer/pooling.hpp"
#include "shared_test_classes/base/ov_subgraph.hpp"
using namespace ov::test;
namespace GPULayerTestsDefinitions {
using poolLayerGpuTestParamsSet = std::tuple<LayerTestsDefinitions::poolSpecificParams,
InputShape,
ElementType>;
class PoolingLayerGPUTest : public testing::WithParamInterface<poolLayerGpuTestParamsSet>,
virtual public SubgraphBaseTest {
public:
static std::string getTestCaseName(const testing::TestParamInfo<poolLayerGpuTestParamsSet>& obj) {
LayerTestsDefinitions::poolSpecificParams basicParamsSet;
InputShape inputShapes;
ElementType inPrc;
std::tie(basicParamsSet, inputShapes, inPrc) = obj.param;
ngraph::helpers::PoolingTypes poolType;
std::vector<size_t> kernel, stride;
std::vector<size_t> padBegin, padEnd;
ngraph::op::PadType padType;
ngraph::op::RoundingType roundingType;
bool excludePad;
std::tie(poolType, kernel, stride, padBegin, padEnd, roundingType, padType, excludePad) = basicParamsSet;
std::ostringstream results;
results << "IS=(";
results << CommonTestUtils::partialShape2str({inputShapes.first}) << ")_";
results << "TS=";
for (const auto& shape : inputShapes.second) {
results << CommonTestUtils::vec2str(shape) << "_";
}
results << "Prc=" << inPrc << "_";
switch (poolType) {
case ngraph::helpers::PoolingTypes::MAX:
results << "MaxPool_";
break;
case ngraph::helpers::PoolingTypes::AVG:
results << "AvgPool_";
results << "ExcludePad=" << excludePad << "_";
break;
}
results << "K" << CommonTestUtils::vec2str(kernel) << "_";
results << "S" << CommonTestUtils::vec2str(stride) << "_";
results << "PB" << CommonTestUtils::vec2str(padBegin) << "_";
results << "PE" << CommonTestUtils::vec2str(padEnd) << "_";
results << "Rounding=" << roundingType << "_";
results << "AutoPad=" << padType << "_";
return results.str();
}
protected:
void SetUp() override {
targetDevice = CommonTestUtils::DEVICE_GPU;
LayerTestsDefinitions::poolSpecificParams basicParamsSet;
InputShape inputShapes;
ElementType inPrc;
std::tie(basicParamsSet, inputShapes, inPrc) = this->GetParam();
ngraph::helpers::PoolingTypes poolType;
std::vector<size_t> kernel, stride;
std::vector<size_t> padBegin, padEnd;
ngraph::op::PadType padType;
ngraph::op::RoundingType roundingType;
bool excludePad;
std::tie(poolType, kernel, stride, padBegin, padEnd, roundingType, padType, excludePad) = basicParamsSet;
init_input_shapes({inputShapes});
auto params = ngraph::builder::makeDynamicParams(inPrc, inputDynamicShapes);
std::shared_ptr<ngraph::Node> poolInput = params[0];
std::shared_ptr<ngraph::Node> pooling = ngraph::builder::makePooling(poolInput,
stride,
padBegin,
padEnd,
kernel,
roundingType,
padType,
excludePad,
poolType);
auto makeFunction = [](const ngraph::element::Type &ngPrc, ngraph::ParameterVector &params, const std::shared_ptr<ngraph::Node> &lastNode) {
ngraph::ResultVector results;
for (int i = 0; i < lastNode->get_output_size(); i++)
results.push_back(std::make_shared<ngraph::opset1::Result>(lastNode->output(i)));
return std::make_shared<ngraph::Function>(results, params, "PoolingGPU");
};
function = makeFunction(inPrc, params, pooling);
}
};
TEST_P(PoolingLayerGPUTest, CompareWithRefs) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
run();
}
namespace {
const std::vector<ElementType> inpOutPrecision = { ElementType::f32 };
const std::vector<InputShape> inputShapes3D = {
{ {}, {{3, 4, 64}} },
{ {}, {{2, 8, 12}} },
{ {}, {{1, 16, 12}} },
{ {}, {{1, 21, 4}} },
{ {}, {{1, 32, 8}} },
{
// dynamic
{-1, -1, -1},
// target
{
{1, 32, 8},
{1, 21, 4},
{2, 8, 12}
}
},
{
// dynamic
{{1, 5}, {4, 32}, {1, 64}},
// target
{
{3, 4, 64},
{1, 16, 12},
{1, 32, 8}
}
}
};
const std::vector<InputShape> inputShapes4D = {
{ {}, {{3, 4, 64, 64}} },
{ {}, {{2, 8, 8, 12}} },
{ {}, {{1, 16, 16, 12}} },
{ {}, {{1, 21, 8, 4}} },
{ {}, {{1, 32, 8, 8}} },
{
// dynamic
{-1, -1, -1, -1},
// target
{
{1, 32, 8, 8},
{1, 21, 8, 4},
{2, 8, 8, 12}
}
},
{
// dynamic
{{1, 5}, {4, 32}, {1, 64}, {1, 64}},
// target
{
{3, 4, 64, 64},
{1, 16, 16, 12},
{1, 32, 8, 8}
}
},
{
// dynamic
{{1, 10}, 16, 8, 8},
// target
{
{1, 16, 8, 8},
{2, 16, 8, 8},
}
}
};
const std::vector<InputShape> inputShapes4D_Large = {
{
// dynamic
{-1, -1, -1, -1},
// target
{
{1, 16, 65, 65},
{1, 8, 130, 130},
{1, 16, 65, 65}
}
},
};
const std::vector<InputShape> inputShapes5D = {
{ {}, {{1, 4, 16, 16, 16}} },
{ {}, {{2, 8, 8, 8, 8}} },
{ {}, {{2, 16, 12, 16, 20}} },
{ {}, {{1, 19, 16, 20, 8}} },
{ {}, {{1, 32, 16, 8, 12}} },
{
// dynamic
{-1, -1, -1, -1, -1},
// target
{
{2, 8, 8, 8, 8},
{1, 19, 16, 20, 8},
{1, 4, 16, 16, 16}
}
},
{
// dynamic
{{1, 5}, {4, 32}, {1, 64}, {1, 64}, {1, 25}},
// target
{
{1, 4, 16, 16, 16},
{1, 32, 16, 8, 12},
{3, 16, 4, 8, 3}
}
}
};
/* ============= Pooling (1D) ============= */
const std::vector<LayerTestsDefinitions::poolSpecificParams> paramsMax3D = {
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {2}, {2}, {0}, {0},
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false },
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {4}, {2}, {0}, {0},
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false },
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {2}, {1}, {0}, {0},
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false },
};
const std::vector<LayerTestsDefinitions::poolSpecificParams> paramsAvg3D = {
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {3}, {1}, {1}, {0},
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_UPPER, false },
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {3}, {1}, {1}, {0},
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, true },
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {4}, {4}, {2}, {2},
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, true },
};
INSTANTIATE_TEST_SUITE_P(smoke_MaxPool_GPU_3D, PoolingLayerGPUTest,
::testing::Combine(
::testing::ValuesIn(paramsMax3D),
::testing::ValuesIn(inputShapes3D),
::testing::ValuesIn(inpOutPrecision)),
PoolingLayerGPUTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_AvgPool_GPU_3D, PoolingLayerGPUTest,
::testing::Combine(
::testing::ValuesIn(paramsAvg3D),
::testing::ValuesIn(inputShapes3D),
::testing::ValuesIn(inpOutPrecision)),
PoolingLayerGPUTest::getTestCaseName);
/* ============= Pooling (2D) ============= */
const std::vector<LayerTestsDefinitions::poolSpecificParams> paramsMax4D = {
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {2, 2}, {2, 2}, {0, 0}, {0, 0},
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_LOWER, false },
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {2, 2}, {2, 2}, {0, 0}, {0, 0},
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_UPPER, false },
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {4, 2}, {2, 2}, {0, 0}, {0, 0},
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false },
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {4, 2}, {2, 1}, {0, 0}, {0, 0},
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false },
};
const std::vector<LayerTestsDefinitions::poolSpecificParams> paramsAvg4D = {
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2}, {2, 2}, {1, 0}, {0, 0},
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_LOWER, true },
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2}, {2, 2}, {1, 0}, {0, 0},
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_UPPER, true },
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2}, {2, 2}, {1, 0}, {0, 0},
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_LOWER, false },
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2}, {2, 2}, {1, 0}, {0, 0},
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_UPPER, false },
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2}, {2, 2}, {0, 0}, {0, 0},
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, true },
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {4, 4}, {4, 4}, {2, 2}, {2, 2},
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, true }
};
INSTANTIATE_TEST_SUITE_P(smoke_MaxPool_GPU_4D, PoolingLayerGPUTest,
::testing::Combine(
::testing::ValuesIn(paramsMax4D),
::testing::ValuesIn(inputShapes4D),
::testing::ValuesIn(inpOutPrecision)),
PoolingLayerGPUTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_AvgPool_GPU_4D, PoolingLayerGPUTest,
::testing::Combine(
::testing::ValuesIn(paramsAvg4D),
::testing::ValuesIn(inputShapes4D),
::testing::ValuesIn(inpOutPrecision)),
PoolingLayerGPUTest::getTestCaseName);
const std::vector<LayerTestsDefinitions::poolSpecificParams> paramsAvg4D_Large = {
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {65, 65}, {65, 65}, {0, 0}, {0, 0},
ngraph::op::RoundingType::FLOOR, ngraph::op::PadType::VALID, true },
};
INSTANTIATE_TEST_SUITE_P(smoke_AvgPool_GPU_Large, PoolingLayerGPUTest,
::testing::Combine(
::testing::ValuesIn(paramsAvg4D_Large),
::testing::ValuesIn(inputShapes4D_Large),
::testing::ValuesIn(inpOutPrecision)),
PoolingLayerGPUTest::getTestCaseName);
/* ============= Pooling (3D) ============= */
const std::vector<LayerTestsDefinitions::poolSpecificParams> paramsMax5D = {
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {2, 2, 2}, {1, 1, 1}, {0, 0, 0}, {0, 0, 0},
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_LOWER, false },
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {2, 2, 2}, {1, 1, 1}, {0, 0, 0}, {0, 0, 0},
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_UPPER, false },
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {2, 2, 2}, {1, 1, 1}, {1, 1, 1}, {1, 1, 1},
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false },
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {3, 3, 3}, {2, 2, 2}, {1, 1, 1}, {1, 1, 1},
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false },
};
const std::vector<LayerTestsDefinitions::poolSpecificParams> paramsAvg5D = {
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2, 2}, {2, 2, 2}, {1, 0, 0}, {0, 0, 0},
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_LOWER, true },
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2, 2}, {2, 2, 2}, {1, 0, 0}, {0, 0, 0},
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_UPPER, true },
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2, 2}, {2, 2, 2}, {1, 0, 0}, {0, 0, 0},
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_LOWER, false },
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2, 2}, {2, 2, 2}, {1, 0, 0}, {0, 0, 0},
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_UPPER, false },
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2, 2}, {2, 2, 2}, {0, 0, 0}, {0, 0, 0},
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, true },
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {3, 3, 3}, {3, 3, 3}, {1, 1, 1}, {0, 0, 0},
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, true },
LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {4, 4, 4}, {2, 2, 2}, {2, 2, 2}, {2, 2, 2},
ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, true }
};
INSTANTIATE_TEST_SUITE_P(smoke_MaxPool_GPU_5D, PoolingLayerGPUTest,
::testing::Combine(
::testing::ValuesIn(paramsMax5D),
::testing::ValuesIn(inputShapes5D),
::testing::ValuesIn(inpOutPrecision)),
PoolingLayerGPUTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_AvgPool_GPU_5D, PoolingLayerGPUTest,
::testing::Combine(
::testing::ValuesIn(paramsAvg5D),
::testing::ValuesIn(inputShapes5D),
::testing::ValuesIn(inpOutPrecision)),
PoolingLayerGPUTest::getTestCaseName);
} // namespace
} // namespace GPULayerTestsDefinitions