Review pooling classes for shape inference aspects (#16114)

* Review adaptive avg pool shape inference

* Review adaptive max pool shape inference

* Review AvgPool and MaxPool

* Minor improvement for StaticShape

* Update ShapeInferBaseWithPadding's infer
to be compatible with interface after rebase

* Fix build issues

* Set default pads before checks

* Fix include openvino headers
This commit is contained in:
Pawel Raasz 2023-03-13 13:48:44 +01:00 committed by GitHub
parent 63338b6e08
commit 72566cde0d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
24 changed files with 1545 additions and 512 deletions

View File

@ -42,6 +42,7 @@ public:
element::Type get_index_element_type() const { element::Type get_index_element_type() const {
return m_index_element_type; return m_index_element_type;
} }
void set_index_element_type(const element::Type& type);
protected: protected:
ov::element::Type m_index_element_type = ov::element::i64; ov::element::Type m_index_element_type = ov::element::i64;

View File

@ -58,9 +58,12 @@ public:
const Shape& get_pads_end() const { const Shape& get_pads_end() const {
return m_pads_end; return m_pads_end;
} }
OPENVINO_DEPRECATED("This method is deprecated and will be removed soon. Please use set_pads_end instead.")
void set_adding_above(const Shape& pads_end) { void set_adding_above(const Shape& pads_end) {
m_pads_end = pads_end; m_pads_end = pads_end;
} }
void set_pads_end(Shape pads_end);
/// \return The pad type for pooling. /// \return The pad type for pooling.
PadType get_auto_pad() const { PadType get_auto_pad() const {
return m_auto_pad; return m_auto_pad;
@ -77,13 +80,6 @@ public:
} }
protected: protected:
bool update_auto_padding(const PartialShape& in_shape,
const Strides& filter_dilations,
Shape& new_pads_end,
Shape& new_pads_begin) const;
PartialShape infer_output_shape(const Strides& dilations);
Shape m_kernel; Shape m_kernel;
Strides m_strides; Strides m_strides;
Shape m_pads_begin; Shape m_pads_begin;

View File

@ -0,0 +1,31 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "openvino/op/adaptive_avg_pool.hpp"
#include "pooling_shape_inference_util.hpp"
#include "utils.hpp"
namespace ov {
namespace op {
namespace v8 {
template <class TShape>
std::vector<TShape> shape_infer(const AdaptiveAvgPool* op,
const std::vector<TShape>& input_shapes,
const std::map<size_t, HostTensorPtr>& constant_data = {}) {
return {pooling::out_shape_infer(op, input_shapes, constant_data)};
}
template <class TShape>
void shape_infer(const AdaptiveAvgPool* op,
const std::vector<TShape>& input_shapes,
std::vector<TShape>& output_shapes,
const std::map<size_t, HostTensorPtr>& constant_data = {}) {
output_shapes = shape_infer(op, input_shapes, constant_data);
}
} // namespace v8
} // namespace op
} // namespace ov

View File

@ -0,0 +1,31 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "openvino/op/adaptive_max_pool.hpp"
#include "pooling_shape_inference_util.hpp"
#include "utils.hpp"
namespace ov {
namespace op {
namespace v8 {
template <class TShape>
std::vector<TShape> shape_infer(const AdaptiveMaxPool* op,
const std::vector<TShape>& input_shapes,
const std::map<size_t, HostTensorPtr>& constant_data = {}) {
return {2, pooling::out_shape_infer(op, input_shapes, constant_data)};
}
template <class TShape>
void shape_infer(const AdaptiveMaxPool* op,
const std::vector<TShape>& input_shapes,
std::vector<TShape>& output_shapes,
const std::map<size_t, HostTensorPtr>& constant_data = {}) {
output_shapes = shape_infer(op, input_shapes, constant_data);
}
} // namespace v8
} // namespace op
} // namespace ov

View File

@ -0,0 +1,59 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "dimension_util.hpp"
#include "max_pool_shape_inference.hpp"
#include "openvino/op/max_pool.hpp"
#include "utils.hpp"
namespace ov {
namespace op {
namespace pooling {
template <>
inline void valid_dilated_kernel_with_padding(const v1::AvgPool* op,
const size_t kernel,
const size_t pad_begin,
const size_t pad_end,
const size_t axis) {
NODE_VALIDATION_CHECK(op,
!op->get_exclude_pad() || ((kernel > pad_begin) && (kernel > pad_end)),
"Kernel after dilation is sometimes entirely in the padding area for axis ",
axis,
" (dilated kernel dimension: ",
kernel,
", padding below dimension: ",
pad_begin,
", padding above dimension: ",
pad_end,
") and this is not ",
"allowed.");
}
} // namespace pooling
namespace v1 {
template <class TShape>
std::vector<TShape> shape_infer(const AvgPool* op, const std::vector<TShape>& input_shapes) {
NODE_VALIDATION_CHECK(op, input_shapes.size() == 1);
const auto& data_shape = input_shapes[0];
const auto dilations = Strides(op->get_kernel().size(), 1);
pooling::update_and_validate_attributes(const_cast<AvgPool*>(op), data_shape, dilations);
auto output_shape = pooling::out_shape_infer(op, data_shape, dilations);
return {output_shape};
}
template <class TShape>
void shape_infer(const AvgPool* op, const std::vector<TShape>& input_shapes, std::vector<TShape>& output_shapes) {
output_shapes = shape_infer(op, input_shapes);
}
} // namespace v1
} // namespace op
} // namespace ov

View File

@ -0,0 +1,83 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <cstdint>
#include "openvino/util/common_util.hpp"
namespace ov {
namespace util {
namespace dim {
constexpr auto inf_bound = -1; //!< Infinite bound value for dimension.
/**
* @brief Calculate dilated dimension value.
*
* @param dim Dimension size value.
* @param dilation Dilation value
* @return Dilated dimension value.
*/
template <class T>
constexpr auto dilated(const T dim, const T dilation) -> T {
return (dim < 1) ? inf_bound : dilation * (dim - 1) + 1;
}
/**
* @brief Calculate padded dimension size as dim size + padding size
*
* @param dim Dimension size value.
* @param pad_num Number of padded dimension.
* @return Padded dimension value or infinite bound.
*/
constexpr auto padded(const int64_t dim, const int64_t pad_num) -> int64_t {
return ((dim == inf_bound) || (dim + pad_num < 0)) ? inf_bound : dim + pad_num;
}
/**
* @brief Divide dimension using ceil rounding.
*
* @tparam TDim Dimension type.
* @tparam T Dimension length value type.
*
* @param dim Input dimension.
* @param divisor Dimension division.
* @return Divided dimension with bounds round up.
*/
template <class TDim, class T = typename TDim::value_type>
auto ceil_div(const TDim& dim, const T divisor) -> TDim {
if (dim.is_static()) {
return {util::ceil_div<T>(dim.get_length(), divisor)};
} else if (dim.get_max_length() == static_cast<T>(dim::inf_bound)) {
return {dim};
} else {
return {util::ceil_div<T>(dim.get_min_length(), divisor), util::ceil_div<T>(dim.get_max_length(), divisor)};
}
}
/**
* @brief Divide dimension using floor rounding.
*
* @tparam TDim Dimension type.
* @tparam T Dimension length value type.
*
* @param dim Input dimension.
* @param divisor Dimension division.
* @return Divided dimension with bound round down.
*/
template <class TDim, class T = typename TDim::value_type>
auto floor_div(const TDim& dim, const T divisor) -> TDim {
if (dim.is_static()) {
return {dim.get_length() / divisor};
} else if (dim.get_max_length() == static_cast<T>(dim::inf_bound)) {
return {dim};
} else {
return {dim.get_min_length() / divisor, dim.get_max_length() / divisor};
}
}
} // namespace dim
} // namespace util
} // namespace ov

View File

@ -0,0 +1,56 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "dimension_util.hpp"
#include "pooling_shape_inference_util.hpp"
#include "utils.hpp"
namespace ov {
namespace op {
namespace v1 {
template <class TShape>
std::vector<TShape> shape_infer(const MaxPool* op, const std::vector<TShape>& input_shapes) {
NODE_VALIDATION_CHECK(op, input_shapes.size() == 1);
const auto& data_shape = input_shapes[0];
const auto dilations = Strides(op->get_kernel().size(), 1);
pooling::update_and_validate_attributes(const_cast<MaxPool*>(op), data_shape, dilations);
return {pooling::out_shape_infer(op, data_shape, dilations)};
}
template <class TShape>
void shape_infer(const MaxPool* op, const std::vector<TShape>& input_shapes, std::vector<TShape>& output_shapes) {
output_shapes = shape_infer(op, input_shapes);
}
} // namespace v1
namespace v8 {
template <class TShape>
std::vector<TShape> shape_infer(const MaxPool* op, const std::vector<TShape>& input_shapes) {
NODE_VALIDATION_CHECK(op, input_shapes.size() == 1);
const auto& data_shape = input_shapes[0];
auto dilations = op->get_dilations();
if (dilations.empty()) {
dilations.resize(op->get_kernel().size(), 1);
}
pooling::update_and_validate_attributes(const_cast<MaxPool*>(op), data_shape, dilations);
auto output_shape = pooling::out_shape_infer(op, data_shape, dilations);
return {2, output_shape};
}
template <class TShape>
void shape_infer(const MaxPool* op, const std::vector<TShape>& input_shapes, std::vector<TShape>& output_shapes) {
output_shapes = shape_infer(op, input_shapes);
}
} // namespace v8
} // namespace op
} // namespace ov

View File

@ -0,0 +1,264 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "dimension_util.hpp"
#include "utils.hpp"
namespace ov {
namespace op {
namespace pooling {
constexpr size_t spatial_dim_offset = 2;
/**
* @brief Calculate dimension padding required by filter/kernel properties.
*
* Provides pair of padding values as left padding is total value of required padding divided by 2 and right as
* total required padding minus left padding.
*
* @param dim input dimension to calculate its padding.
* @param filter_size Kernel size for input dimension.
* @param dilation Kernel dilation.
* @param stride Kernel stride.
* @return Pair of left, right padding values for input dimension.
*/
template <class TDim, class T = typename TDim::value_type>
inline std::pair<T, T> dim_padding(const TDim& dim, const int64_t kernel_size, const int64_t dilation, int64_t stride) {
if (dim.is_static()) {
const auto dim_size = static_cast<int64_t>(dim.get_length());
const auto dilated_kernel = ov::util::dim::dilated(kernel_size, dilation);
const int64_t tmp = (dim_size + stride - 1) / stride;
const auto padding = std::max<int64_t>(0, (tmp - 1) * stride + dilated_kernel - dim_size);
const auto left_padding = padding / 2;
return {left_padding, padding - left_padding};
} else {
// If input dimension is infinite or interval the padding will be set to 0
// as operator cannot store paddings for both bounds.
return {0, 0};
}
}
template <class TOp, class TShape>
void update_and_validate_attributes(TOp* op, const TShape& data_shape, const Strides& dilations) {
const auto& data_rank = data_shape.rank();
NODE_VALIDATION_CHECK(op,
is_rank_compatible_any_of(data_rank, {3, 4, 5}),
"Expected a 3D, 4D or 5D tensor for the input. Got: ",
data_shape);
const auto& kernel = op->get_kernel();
const auto& auto_pad = op->get_auto_pad();
const auto num_spatial = kernel.size();
const auto& strides = op->get_strides();
if (auto_pad == PadType::VALID || op->get_pads_begin().empty()) {
op->set_pads_begin(Shape(num_spatial, 0));
}
if (auto_pad == PadType::VALID || op->get_pads_end().empty()) {
op->set_pads_end(Shape(num_spatial, 0));
}
NODE_VALIDATION_CHECK(op,
op->get_pads_begin().size() == num_spatial,
"Expected pads_begin size to be equal to input size - 2. Got: ",
op->get_pads_begin().size());
NODE_VALIDATION_CHECK(op,
op->get_pads_end().size() == num_spatial,
"Expected pads_end size to be equal to input size - 2. Got: ",
op->get_pads_end().size());
NODE_VALIDATION_CHECK(op,
strides.size() == num_spatial,
"Expected strides size to be equal to input size - 2. Got: ",
strides.size());
NODE_VALIDATION_CHECK(op,
dilations.size() == num_spatial,
"Expected dilations size to be equal to kernel size. Got: ",
dilations.size());
if (data_rank.is_static()) {
NODE_VALIDATION_CHECK(op,
num_spatial == (data_shape.size() - spatial_dim_offset),
"Expected kernel size to be equal to input size - 2. Got: ",
num_spatial);
if (auto_pad == PadType::SAME_UPPER || auto_pad == PadType::SAME_LOWER) {
Shape pads_begin, pads_end;
pads_begin.reserve(num_spatial);
pads_end.reserve(num_spatial);
auto data_dim = data_shape.cbegin() + spatial_dim_offset;
auto pad_begin_ins = std::back_inserter(pads_begin);
auto pad_end_ins = std::back_inserter(pads_end);
auto& pad_left = auto_pad == PadType::SAME_UPPER ? pad_begin_ins : pad_end_ins;
auto& pad_right = auto_pad == PadType::SAME_UPPER ? pad_end_ins : pad_begin_ins;
for (size_t i = 0; i < num_spatial; ++i, ++pad_left, ++pad_right, ++data_dim) {
std::tie(*pad_left, *pad_right) = dim_padding(*data_dim, kernel[i], dilations[i], strides[i]);
}
op->set_pads_begin(pads_begin);
op->set_pads_end(std::move(pads_end));
}
}
constexpr auto is_zero = cmp::Equal<size_t>(0);
NODE_VALIDATION_CHECK(op,
std::none_of(strides.cbegin(), strides.cend(), is_zero),
"Strides has zero dimension(s). ",
strides);
NODE_VALIDATION_CHECK(op,
std::none_of(dilations.cbegin(), dilations.cend(), is_zero),
"Kernel dilations has zero dimension(s). ",
dilations);
}
template <class TOp, class TDim>
void valid_dilated_kernel_with_dim(const TOp* op, const size_t kernel, const TDim& dim, const size_t axis) {
NODE_VALIDATION_CHECK(op,
kernel > 0,
"Kernel after dilation has dimension less than 1 (dim: ",
kernel,
") at axis ",
axis,
".");
NODE_VALIDATION_CHECK(op,
cmp::le(kernel, dim.get_length()),
"Kernel after dilation has size (dim: ",
kernel,
") larger than the data shape after padding (dim: ",
dim,
") at axis ",
axis,
".");
}
template <class TOp>
void valid_dilated_kernel_with_padding(const TOp* op,
const size_t kernel,
const size_t pad_begin,
const size_t pad_end,
const size_t axis) {}
template <class TOp, class TShape>
TShape spatial_shape_infer(const TOp* op, const TShape& data_shape, const Strides& dilations) {
using namespace ov::util;
const auto spatial_num = data_shape.size() - spatial_dim_offset;
const auto is_ceil_mode = op->get_rounding_type() == RoundingType::CEIL;
const auto is_auto_pad = (op->get_auto_pad() == PadType::SAME_UPPER) || (op->get_auto_pad() == PadType::SAME_LOWER);
using TDim = typename TShape::value_type;
const auto& dim_divide = is_ceil_mode ? dim::ceil_div<TDim> : dim::floor_div<TDim>;
TShape out_shape;
out_shape.reserve(spatial_num);
auto data_dim = data_shape.cbegin() + spatial_dim_offset;
const auto& pads_begin = op->get_pads_begin();
const auto& pads_end = op->get_pads_end();
const auto& kernel = op->get_kernel();
const auto& stride = op->get_strides();
for (size_t i = 0; i < spatial_num; ++i, ++data_dim) {
if (data_dim->is_static() || !is_auto_pad) {
auto dim = *data_dim + (pads_begin[i] + pads_end[i]);
const auto kernel_dilated = dim::dilated(kernel[i], dilations[i]);
if (data_dim->is_static()) {
valid_dilated_kernel_with_dim(op, kernel_dilated, dim, i);
valid_dilated_kernel_with_padding(op, kernel_dilated, pads_begin[i], pads_end[i], i);
}
dim = dim - kernel_dilated;
dim = dim_divide(dim, stride[i]);
dim += 1;
out_shape.push_back(std::move(dim));
} else {
// If dimension is interval and is auto pad then result is dynamic shape as padding values are not correct.
// Operator cannot keep separate auto padding values for upper, lower bounds.
out_shape.emplace_back(dim::inf_bound);
}
}
return out_shape;
}
/**
* @brief Shape inference helper used for pooling operators such Max Pool, Avg Pool.
*/
template <class TOp, class TShape>
TShape out_shape_infer(const TOp* op, const TShape& data_shape, const Strides& dilations) {
TShape out_shape;
if (data_shape.rank().is_static()) {
const auto& batch_size = data_shape[0];
const auto& channel_count = data_shape[1];
NODE_VALIDATION_CHECK(op, batch_size.is_dynamic() || batch_size.get_length() > 0, "Batch size is zero.");
NODE_VALIDATION_CHECK(op,
channel_count.is_dynamic() || channel_count.get_length() > 0,
"Channel count is zero.");
out_shape = spatial_shape_infer(op, data_shape, dilations);
out_shape.insert(out_shape.begin(), data_shape.begin(), data_shape.begin() + spatial_dim_offset);
} else {
out_shape.insert(out_shape.begin(), spatial_dim_offset + op->get_kernel().size(), Dimension::dynamic());
}
return out_shape;
}
/**
* @brief Shape inference helper used for adaptive pooling operators.
*/
template <class TShape,
class TOp,
typename std::enable_if<std::is_same<TOp, v8::AdaptiveAvgPool>::value ||
std::is_same<TOp, v8::AdaptiveMaxPool>::value>::type* = nullptr>
TShape out_shape_infer(const TOp* op,
const std::vector<TShape>& input_shapes,
const std::map<size_t, HostTensorPtr>& constant_data = {}) {
NODE_VALIDATION_CHECK(op, input_shapes.size() == 2);
const auto& data_shape = input_shapes[0];
const auto& out_spatial_shape = input_shapes[1];
const auto& data_rank = data_shape.rank();
NODE_VALIDATION_CHECK(op,
is_rank_compatible_any_of(data_rank, {3, 4, 5}),
"Expected a 3D, 4D or 5D tensor for the input. Got: ",
data_shape);
TShape output_shape;
if (data_rank.is_static()) {
auto num_of_spatial_dims = data_shape.size() - spatial_dim_offset;
NODE_VALIDATION_CHECK(
op,
out_spatial_shape.rank().is_dynamic() || out_spatial_shape[0].compatible(num_of_spatial_dims),
"Output shape for spatial dimension not compatible with data shape.");
output_shape.reserve(data_shape.size());
std::copy_n(data_shape.begin(), spatial_dim_offset, std::back_inserter(output_shape));
if (const auto spatial_dims = get_input_const_data_as_shape<TShape>(op, 1, constant_data)) {
NODE_VALIDATION_CHECK(op,
num_of_spatial_dims == spatial_dims->size(),
"Number of spatial dimensions is not compatible with input data rank");
output_shape.insert(output_shape.end(), spatial_dims->begin(), spatial_dims->end());
} else {
output_shape.insert(output_shape.end(), num_of_spatial_dims, ov::util::dim::inf_bound);
}
} else {
output_shape = PartialShape::dynamic();
}
return output_shape;
}
} // namespace pooling
} // namespace op
} // namespace ov

View File

@ -2,15 +2,14 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "ngraph/op/adaptive_avg_pool.hpp" #include "openvino/op/adaptive_avg_pool.hpp"
#include "adaptive_avg_pool_shape_inference.hpp"
#include "itt.hpp" #include "itt.hpp"
#include "ngraph/attribute_visitor.hpp"
#include "ngraph/graph_util.hpp"
#include "ngraph/validation_util.hpp"
using namespace std; using namespace std;
using namespace ngraph;
namespace ov {
op::v8::AdaptiveAvgPool::AdaptiveAvgPool(const Output<Node>& data, const Output<Node>& output_shape) op::v8::AdaptiveAvgPool::AdaptiveAvgPool(const Output<Node>& data, const Output<Node>& output_shape)
: Op({data, output_shape}) { : Op({data, output_shape}) {
@ -20,33 +19,7 @@ op::v8::AdaptiveAvgPool::AdaptiveAvgPool(const Output<Node>& data, const Output<
void op::v8::AdaptiveAvgPool::validate_and_infer_types() { void op::v8::AdaptiveAvgPool::validate_and_infer_types() {
OV_OP_SCOPE(v8_AdaptiveAvgPool_validate_and_infer_types); OV_OP_SCOPE(v8_AdaptiveAvgPool_validate_and_infer_types);
const ov::PartialShape& data_shape = get_input_partial_shape(0); const auto output_shape = shape_infer(this, get_node_input_partial_shapes(*this)).front();
NODE_VALIDATION_CHECK(
this,
data_shape.rank().compatible(3) || data_shape.rank().compatible(4) || data_shape.rank().compatible(5),
"Expected a 3D, 4D or 5D tensor for the input. Got: ",
data_shape);
auto output_shape = ov::PartialShape::dynamic(data_shape.rank());
if (data_shape.rank().is_static()) {
if (data_shape[0].is_static()) {
output_shape[0] = data_shape[0]; // batch size
}
if (data_shape[1].is_static()) {
output_shape[1] = data_shape[1]; // channel size
}
if (const auto& const_output_shape = get_constant_from_source(input_value(1))) {
auto output_spatial_shape = const_output_shape->cast_vector<int64_t>();
NODE_VALIDATION_CHECK(this,
(size_t)data_shape.rank().get_length() == 2 + output_spatial_shape.size(),
"Output shape is not compatible with input data rank");
int i = 2;
for (auto& dim : output_spatial_shape) {
output_shape[i++] = dim;
}
}
}
set_output_type(0, get_input_element_type(0), output_shape); set_output_type(0, get_input_element_type(0), output_shape);
} }
@ -55,3 +28,5 @@ shared_ptr<Node> op::v8::AdaptiveAvgPool::clone_with_new_inputs(const OutputVect
check_new_args_count(this, new_args); check_new_args_count(this, new_args);
return make_shared<v8::AdaptiveAvgPool>(new_args.at(0), new_args.at(1)); return make_shared<v8::AdaptiveAvgPool>(new_args.at(0), new_args.at(1));
} }
} // namespace ov

View File

@ -2,19 +2,17 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "ngraph/op/adaptive_max_pool.hpp" #include "openvino/op/adaptive_max_pool.hpp"
#include "adaptive_max_pool_shape_inference.hpp"
#include "itt.hpp" #include "itt.hpp"
#include "ngraph/attribute_visitor.hpp"
#include "ngraph/graph_util.hpp"
#include "ngraph/validation_util.hpp"
using namespace std; using namespace std;
using namespace ngraph; namespace ov {
op::v8::AdaptiveMaxPool::AdaptiveMaxPool(const Output<Node>& data, op::v8::AdaptiveMaxPool::AdaptiveMaxPool(const Output<Node>& data,
const Output<Node>& output_shape, const Output<Node>& output_shape,
const ngraph::element::Type& index_element_type) const element::Type& index_element_type)
: Op({data, output_shape}), : Op({data, output_shape}),
m_index_element_type{index_element_type} { m_index_element_type{index_element_type} {
constructor_validate_and_infer_types(); constructor_validate_and_infer_types();
@ -33,35 +31,10 @@ void op::v8::AdaptiveMaxPool::validate_and_infer_types() {
m_index_element_type == element::i64 || m_index_element_type == element::i32, m_index_element_type == element::i64 || m_index_element_type == element::i32,
"Index element type must be i32 or i64"); "Index element type must be i32 or i64");
const ov::PartialShape& data_shape = get_input_partial_shape(0); const auto output_shapes = shape_infer(this, get_node_input_partial_shapes(*this));
NODE_VALIDATION_CHECK( set_output_type(0, get_input_element_type(0), output_shapes[0]);
this, set_output_type(1, m_index_element_type, output_shapes[1]);
data_shape.rank().compatible(3) || data_shape.rank().compatible(4) || data_shape.rank().compatible(5),
"Expected a 3D, 4D or 5D tensor for the input. Got: ",
data_shape);
auto output_shape = ov::PartialShape::dynamic(data_shape.rank());
if (data_shape.rank().is_static()) {
if (data_shape[0].is_static()) {
output_shape[0] = data_shape[0]; // batch size
}
if (data_shape[1].is_static()) {
output_shape[1] = data_shape[1]; // channel size
}
if (const auto& const_output_shape = get_constant_from_source(input_value(1))) {
auto output_spatial_shape = const_output_shape->cast_vector<int64_t>();
NODE_VALIDATION_CHECK(this,
(size_t)data_shape.rank().get_length() == 2 + output_spatial_shape.size(),
"Output shape is not compatible with input data rank");
int i = 2;
for (auto& dim : output_spatial_shape) {
output_shape[i++] = dim;
}
}
}
set_output_type(0, get_input_element_type(0), output_shape);
set_output_type(1, m_index_element_type, output_shape);
} }
shared_ptr<Node> op::v8::AdaptiveMaxPool::clone_with_new_inputs(const OutputVector& new_args) const { shared_ptr<Node> op::v8::AdaptiveMaxPool::clone_with_new_inputs(const OutputVector& new_args) const {
@ -69,3 +42,8 @@ shared_ptr<Node> op::v8::AdaptiveMaxPool::clone_with_new_inputs(const OutputVect
check_new_args_count(this, new_args); check_new_args_count(this, new_args);
return make_shared<v8::AdaptiveMaxPool>(new_args.at(0), new_args.at(1), m_index_element_type); return make_shared<v8::AdaptiveMaxPool>(new_args.at(0), new_args.at(1), m_index_element_type);
} }
void op::v8::AdaptiveMaxPool::set_index_element_type(const element::Type& type) {
m_index_element_type = type;
}
} // namespace ov

View File

@ -4,6 +4,7 @@
#include "ngraph/op/avg_pool.hpp" #include "ngraph/op/avg_pool.hpp"
#include "avg_pool_shape_inference.hpp"
#include "itt.hpp" #include "itt.hpp"
#include "ngraph/attribute_visitor.hpp" #include "ngraph/attribute_visitor.hpp"
#include "ngraph/graph_util.hpp" #include "ngraph/graph_util.hpp"
@ -45,91 +46,9 @@ bool ov::op::v1::AvgPool::visit_attributes(AttributeVisitor& visitor) {
void ov::op::v1::AvgPool::validate_and_infer_types() { void ov::op::v1::AvgPool::validate_and_infer_types() {
OV_OP_SCOPE(v1_AvgPool_validate_and_infer_types); OV_OP_SCOPE(v1_AvgPool_validate_and_infer_types);
if (0 == m_strides.size()) {
m_strides = Strides(m_kernel.size(), 1);
}
if (0 == m_pads_begin.size()) { const auto output_shapes = shape_infer(this, get_node_input_partial_shapes(*this));
m_pads_begin = Shape(m_kernel.size(), 0); set_output_type(0, get_input_element_type(0), output_shapes.front());
}
if (0 == m_pads_end.size()) {
m_pads_end = Shape(m_kernel.size(), 0);
}
const ov::PartialShape& arg_shape = get_input_partial_shape(0);
NODE_VALIDATION_CHECK(
this,
arg_shape.rank().compatible(3) || arg_shape.rank().compatible(4) || arg_shape.rank().compatible(5),
"Expected a 3D, 4D or 5D tensor for the input. Got: ",
arg_shape);
if (arg_shape.rank().is_static()) {
NODE_VALIDATION_CHECK(this,
static_cast<int64_t>(m_pads_end.size()) == arg_shape.rank().get_max_length() - 2,
"Expected pads_end size to be equal to input size - 2. Got: ",
m_pads_end.size());
NODE_VALIDATION_CHECK(this,
static_cast<int64_t>(m_pads_begin.size()) == arg_shape.rank().get_max_length() - 2,
"Expected pads_begin size to be equal to input size - 2. Got: ",
m_pads_begin.size());
NODE_VALIDATION_CHECK(this,
static_cast<int64_t>(m_kernel.size()) == arg_shape.rank().get_max_length() - 2,
"Expected kernel size to be equal to input size - 2. Got: ",
m_kernel.size());
NODE_VALIDATION_CHECK(this,
static_cast<int64_t>(m_strides.size()) == arg_shape.rank().get_max_length() - 2,
"Expected strides size to be equal to input size - 2. Got: ",
m_kernel.size());
}
auto output_shape = ov::PartialShape::dynamic();
if (arg_shape.rank().is_static() && arg_shape.rank().get_max_length() > 0) {
output_shape = std::vector<Dimension>(arg_shape.rank().get_max_length(), Dimension::dynamic());
if (arg_shape[0].is_static()) {
output_shape[0] = arg_shape[0]; // batch size
}
if (arg_shape[1].is_static()) {
output_shape[1] = arg_shape[1]; // channel size
}
}
bool update_auto_padding_succeed = true;
if (m_auto_pad == PadType::SAME_UPPER || m_auto_pad == PadType::SAME_LOWER) {
CoordinateDiff pads_end;
CoordinateDiff pads_begin;
update_auto_padding_succeed = ngraph::try_apply_auto_padding(arg_shape,
m_kernel,
m_strides,
Strides(m_kernel.size(), 1), // No dilation
m_auto_pad,
pads_end,
pads_begin);
m_pads_end = Shape(pads_end.begin(), pads_end.end());
m_pads_begin = Shape(pads_begin.begin(), pads_begin.end());
}
if (m_auto_pad == PadType::VALID) {
m_pads_end = Shape(m_pads_end.size(), 0);
m_pads_begin = Shape(m_pads_begin.size(), 0);
}
// infer_batched_forward_pooling wants CoordinateDiffs for these, while the pooling ops for
// now still take Shape (no negative padding).
CoordinateDiff pads_begin(m_pads_begin.begin(), m_pads_begin.end());
CoordinateDiff pads_end(m_pads_end.begin(), m_pads_end.end());
set_output_type(0,
get_input_element_type(0),
update_auto_padding_succeed
? ngraph::infer_batched_pooling_forward(this,
arg_shape,
pads_begin,
pads_end,
m_kernel,
m_strides,
!m_exclude_pad,
m_rounding_type == op::RoundingType::CEIL,
Strides{}) // no dilation of the window
: output_shape);
} }
const ov::Shape& ov::op::v1::AvgPool::get_kernel() const { const ov::Shape& ov::op::v1::AvgPool::get_kernel() const {

View File

@ -5,6 +5,7 @@
#include "ngraph/op/max_pool.hpp" #include "ngraph/op/max_pool.hpp"
#include "itt.hpp" #include "itt.hpp"
#include "max_pool_shape_inference.hpp"
#include "ngraph/attribute_visitor.hpp" #include "ngraph/attribute_visitor.hpp"
#include "ngraph/op/add.hpp" #include "ngraph/op/add.hpp"
#include "ngraph/op/constant.hpp" #include "ngraph/op/constant.hpp"
@ -40,11 +41,8 @@ bool ngraph::op::v1::MaxPool::visit_attributes(AttributeVisitor& visitor) {
void op::v1::MaxPool::validate_and_infer_types() { void op::v1::MaxPool::validate_and_infer_types() {
OV_OP_SCOPE(v1_MaxPool_validate_and_infer_types); OV_OP_SCOPE(v1_MaxPool_validate_and_infer_types);
MaxPoolBase::validate_and_infer_types(); const auto output_shapes = shape_infer(this, get_node_input_partial_shapes(*this));
set_output_type(0, get_input_element_type(0), output_shapes.front());
const ov::PartialShape output_shape = infer_output_shape(Strides{}); // no dilations of the filter window
set_output_type(0, get_input_element_type(0), output_shape);
} }
shared_ptr<Node> op::v1::MaxPool::clone_with_new_inputs(const OutputVector& new_args) const { shared_ptr<Node> op::v1::MaxPool::clone_with_new_inputs(const OutputVector& new_args) const {
@ -109,21 +107,8 @@ bool evaluate_maxpool(const HostTensorPtr& arg,
} // namespace maxpool } // namespace maxpool
bool op::v1::MaxPool::evaluate_maxpool(const HostTensorVector& outputs, const HostTensorVector& inputs) const { bool op::v1::MaxPool::evaluate_maxpool(const HostTensorVector& outputs, const HostTensorVector& inputs) const {
auto arg_shape = inputs[0]->get_partial_shape(); const auto input_shapes = std::vector<PartialShape>{inputs[0]->get_partial_shape()};
auto pads_begin_s = get_pads_begin(); auto out_shape = shape_infer(this, input_shapes).front();
auto pads_end_s = get_pads_end();
update_auto_padding(arg_shape, Strides(m_kernel.size(), 1), pads_end_s, pads_begin_s);
CoordinateDiff pads_begin(pads_begin_s.begin(), pads_begin_s.end());
CoordinateDiff pads_end(pads_end_s.begin(), pads_end_s.end());
auto out_shape = infer_batched_pooling_forward(this,
arg_shape,
pads_begin,
pads_end,
get_kernel(),
get_strides(),
true,
get_rounding_type() == op::RoundingType::CEIL,
Strides{}); // no dilation of the window
return maxpool::evaluate_maxpool(inputs[0], return maxpool::evaluate_maxpool(inputs[0],
outputs[0], outputs[0],
@ -286,17 +271,14 @@ bool ngraph::op::v8::MaxPool::visit_attributes(AttributeVisitor& visitor) {
void op::v8::MaxPool::validate_and_infer_types() { void op::v8::MaxPool::validate_and_infer_types() {
OV_OP_SCOPE(v8_MaxPool_validate_and_infer_types); OV_OP_SCOPE(v8_MaxPool_validate_and_infer_types);
MaxPoolBase::validate_and_infer_types();
const auto input_shape = get_input_partial_shape(0); const auto input_shape = get_input_partial_shape(0);
if (input_shape.rank().is_static()) { if (input_shape.rank().is_static()) {
m_axis = ngraph::normalize_axis(this, m_axis, input_shape.rank()); m_axis = ngraph::normalize_axis(this, m_axis, input_shape.rank());
} }
const ov::PartialShape output_shape = infer_output_shape(m_dilations); const auto output_shapes = shape_infer(this, get_node_input_partial_shapes(*this));
set_output_type(0, get_input_element_type(0), output_shapes[0]);
set_output_type(0, get_input_element_type(0), output_shape); set_output_type(1, m_index_element_type, output_shapes[1]);
set_output_type(1, m_index_element_type, output_shape);
} }
shared_ptr<Node> op::v8::MaxPool::clone_with_new_inputs(const OutputVector& new_args) const { shared_ptr<Node> op::v8::MaxPool::clone_with_new_inputs(const OutputVector& new_args) const {
@ -335,21 +317,8 @@ bool op::v8::MaxPool::has_evaluate() const {
bool op::v8::MaxPool::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { bool op::v8::MaxPool::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const {
OV_OP_SCOPE(v8_MaxPool_evaluate); OV_OP_SCOPE(v8_MaxPool_evaluate);
const auto arg_shape = inputs[0]->get_partial_shape(); const auto input_shapes = std::vector<PartialShape>{inputs[0]->get_partial_shape()};
auto pads_begin_s = get_pads_begin(); auto out_shape = shape_infer(this, input_shapes).front();
auto pads_end_s = get_pads_end();
update_auto_padding(arg_shape, get_dilations(), pads_end_s, pads_begin_s);
CoordinateDiff pads_begin(pads_begin_s.begin(), pads_begin_s.end());
CoordinateDiff pads_end(pads_end_s.begin(), pads_end_s.end());
auto out_shape = infer_batched_pooling_forward(this,
arg_shape,
pads_begin,
pads_end,
get_kernel(),
get_strides(),
true,
get_rounding_type() == op::RoundingType::CEIL,
get_dilations());
return maxpool_v8::evaluate_maxpool(inputs[0], return maxpool_v8::evaluate_maxpool(inputs[0],
outputs[0], outputs[0],

View File

@ -2,14 +2,9 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "ngraph/op/util/max_pool_base.hpp" #include "openvino/op/util/max_pool_base.hpp"
#include <ngraph/validation_util.hpp>
#include "itt.hpp" #include "itt.hpp"
#include "ngraph/shape.hpp"
using namespace std;
ov::op::util::MaxPoolBase::MaxPoolBase(const Output<Node>& arg, ov::op::util::MaxPoolBase::MaxPoolBase(const Output<Node>& arg,
const Strides& strides, const Strides& strides,
@ -31,107 +26,19 @@ ov::op::util::MaxPoolBase::MaxPoolBase(const Output<Node>& arg,
void ov::op::util::MaxPoolBase::validate_and_infer_types() { void ov::op::util::MaxPoolBase::validate_and_infer_types() {
OV_OP_SCOPE(util_MaxPoolBase_validate_and_infer_types); OV_OP_SCOPE(util_MaxPoolBase_validate_and_infer_types);
if (0 == m_strides.size()) { if (m_strides.empty()) {
m_strides = Strides(m_kernel.size(), 1); m_strides.resize(m_kernel.size(), 1);
} }
if (0 == m_pads_begin.size()) { if (m_pads_begin.empty()) {
m_pads_begin = ov::Shape(m_kernel.size(), 0); m_pads_begin.resize(m_kernel.size(), 0);
} }
if (0 == m_pads_end.size()) { if (m_pads_end.empty()) {
m_pads_end = ov::Shape(m_kernel.size(), 0); m_pads_end.resize(m_kernel.size(), 0);
}
const PartialShape& arg_shape = get_input_partial_shape(0);
NODE_VALIDATION_CHECK(
this,
arg_shape.rank().compatible(3) || arg_shape.rank().compatible(4) || arg_shape.rank().compatible(5),
"Expected a 3D, 4D or 5D tensor for the input. Got: ",
arg_shape);
if (arg_shape.rank().is_static()) {
NODE_VALIDATION_CHECK(this,
static_cast<int64_t>(m_pads_end.size()) == arg_shape.rank().get_max_length() - 2,
"Expected pads_end size to be equal to input size - 2. Got: ",
m_pads_end.size());
NODE_VALIDATION_CHECK(this,
static_cast<int64_t>(m_pads_begin.size()) == arg_shape.rank().get_max_length() - 2,
"Expected pads_begin size to be equal to input size - 2. Got: ",
m_pads_begin.size());
NODE_VALIDATION_CHECK(this,
static_cast<int64_t>(m_kernel.size()) == arg_shape.rank().get_max_length() - 2,
"Expected kernel size to be equal to input size - 2. Got: ",
m_kernel.size());
NODE_VALIDATION_CHECK(this,
static_cast<int64_t>(m_strides.size()) == arg_shape.rank().get_max_length() - 2,
"Expected strides size to be equal to input size - 2. Got: ",
m_strides.size());
} }
} }
ov::PartialShape ov::op::util::MaxPoolBase::infer_output_shape(const Strides& dilations) { void ov::op::util::MaxPoolBase::set_pads_end(Shape pads_end) {
OV_OP_SCOPE(util_MaxPoolBase_infer_output_shape); m_pads_end = std::move(pads_end);
const auto& arg_shape = get_input_partial_shape(0);
bool update_auto_padding_succeed = true;
if (m_auto_pad == PadType::SAME_UPPER || m_auto_pad == PadType::SAME_LOWER) {
const auto filter_dilations = dilations.empty() ? Strides(m_kernel.size(), 1) : dilations;
update_auto_padding_succeed = update_auto_padding(arg_shape, filter_dilations, m_pads_end, m_pads_begin);
}
if (m_auto_pad == PadType::VALID) {
m_pads_end = ov::Shape(m_pads_end.size(), 0);
m_pads_begin = ov::Shape(m_pads_begin.size(), 0);
}
auto output_shape = PartialShape::dynamic();
if (update_auto_padding_succeed) {
CoordinateDiff pads_begin(m_pads_begin.begin(), m_pads_begin.end());
CoordinateDiff pads_end(m_pads_end.begin(), m_pads_end.end());
output_shape = ngraph::infer_batched_pooling_forward(this,
get_input_partial_shape(0),
pads_begin,
pads_end,
m_kernel,
m_strides,
true,
m_rounding_type == op::RoundingType::CEIL,
dilations);
} else {
if (arg_shape.rank().is_static() && arg_shape.rank().get_max_length() > 0) {
output_shape = std::vector<Dimension>(arg_shape.rank().get_max_length(), Dimension::dynamic());
if (arg_shape[0].is_static()) {
output_shape[0] = arg_shape[0]; // batch size
}
if (arg_shape[1].is_static()) {
output_shape[1] = arg_shape[1]; // channel size
}
}
}
return output_shape;
}
bool ov::op::util::MaxPoolBase::update_auto_padding(const PartialShape& in_shape,
const Strides& filter_dilations,
ov::Shape& new_pads_end,
ov::Shape& new_pads_begin) const {
bool update_auto_padding_succeed = true;
if (m_auto_pad == PadType::SAME_UPPER || m_auto_pad == PadType::SAME_LOWER) {
CoordinateDiff pads_end, pads_begin;
update_auto_padding_succeed = ngraph::try_apply_auto_padding(in_shape,
m_kernel,
m_strides,
filter_dilations,
m_auto_pad,
pads_end,
pads_begin);
new_pads_end = ov::Shape(pads_end.begin(), pads_end.end());
new_pads_begin = ov::Shape(pads_begin.begin(), pads_begin.end());
}
return update_auto_padding_succeed;
} }

View File

@ -779,7 +779,7 @@ void auto_pad_resolving(ov::Node* node) {
} else if (auto op = as_type<ngraph::op::util::MaxPoolBase>(node)) { } else if (auto op = as_type<ngraph::op::util::MaxPoolBase>(node)) {
if (pad_agnostic_types.count(op->get_auto_pad())) { if (pad_agnostic_types.count(op->get_auto_pad())) {
op->set_pads_begin(Shape(op->get_pads_begin().size(), 0)); op->set_pads_begin(Shape(op->get_pads_begin().size(), 0));
op->set_adding_above(Shape(op->get_pads_end().size(), 0)); op->set_pads_end(Shape(op->get_pads_end().size(), 0));
} }
} }
} }

View File

@ -2,94 +2,143 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "common_test_utils/test_assertions.hpp"
#include "gtest/gtest.h" #include "gtest/gtest.h"
#include "ngraph/ngraph.hpp" #include "openvino/opsets/opset10.hpp"
#include "util/type_prop.hpp" #include "util/type_prop.hpp"
using namespace std; using namespace std;
using namespace ngraph; using namespace ov;
using namespace ov::opset10;
using namespace testing;
TEST(type_prop, adaptive_avg_pool) { class AdaptiveAvgPoolV8Test : public TypePropOpTest<op::v8::AdaptiveAvgPool> {};
const PartialShape arg_shape{1, 6, 8, 9};
const vector<int64_t> output_shape{5, 7};
auto data = make_shared<op::Parameter>(element::f32, arg_shape); TEST_F(AdaptiveAvgPoolV8Test, default_ctor) {
auto out_shape = op::Constant::create<int64_t>(element::i64, Shape{2}, output_shape); const auto data = make_shared<Parameter>(element::f32, PartialShape{2, 6, 3, 2});
auto adaptive_pool = make_shared<op::v8::AdaptiveAvgPool>(data, out_shape); const auto out_shape = Constant::create<int64_t>(element::i64, Shape{2}, {5, 7});
ASSERT_TRUE(adaptive_pool->get_output_partial_shape(0).same_scheme({1, 6, 5, 7})); const auto op = make_op();
op->set_arguments(OutputVector{data, out_shape});
op->validate_and_infer_types();
EXPECT_EQ(op->get_input_size(), 2);
EXPECT_EQ(op->get_output_size(), 1);
EXPECT_EQ(op->get_output_element_type(0), element::f32);
EXPECT_EQ(op->get_output_partial_shape(0), PartialShape({2, 6, 5, 7}));
} }
TEST(type_prop, adaptive_avg_pool_dyn_batch) { TEST_F(AdaptiveAvgPoolV8Test, static_dim_shape_prop) {
const PartialShape arg_shape{Dimension::dynamic(), 6, 8, 9}; auto data_shape = PartialShape{1, 6, 8, 9};
const vector<int64_t> output_shape{5, 7}; set_shape_labels(data_shape, 10);
auto data = make_shared<op::Parameter>(element::f32, arg_shape); const auto data = make_shared<Parameter>(element::f32, data_shape);
auto out_shape = op::Constant::create<int64_t>(element::i64, Shape{2}, output_shape); const auto out_shape = Constant::create<int64_t>(element::i64, Shape{2}, {5, 7});
auto adaptive_pool = make_shared<op::v8::AdaptiveAvgPool>(data, out_shape); const auto op = make_op(data, out_shape);
ASSERT_TRUE(adaptive_pool->get_output_partial_shape(0).same_scheme({Dimension::dynamic(), 6, 5, 7})); EXPECT_EQ(op->get_output_element_type(0), element::f32);
EXPECT_EQ(op->get_output_partial_shape(0), PartialShape({1, 6, 5, 7}));
EXPECT_THAT(get_shape_labels(op->get_output_partial_shape(0)), ElementsAre(10, 11, ov::no_label, ov::no_label));
} }
TEST(type_prop, adaptive_avg_pool_dyn_channels) { TEST_F(AdaptiveAvgPoolV8Test, dynamic_batch) {
const PartialShape arg_shape{1, Dimension::dynamic(), 8, 9}; PartialShape data_shape{Dimension::dynamic(), 6, 8, 9};
const vector<int64_t> output_shape{5, 7}; set_shape_labels(data_shape, 10);
auto data = make_shared<op::Parameter>(element::f32, arg_shape); const auto data = make_shared<Parameter>(element::f32, data_shape);
auto out_shape = op::Constant::create<int64_t>(element::i64, Shape{2}, output_shape); const auto out_shape = Constant::create<int64_t>(element::i64, Shape{2}, {5, 7});
auto adaptive_pool = make_shared<op::v8::AdaptiveAvgPool>(data, out_shape); const auto op = make_op(data, out_shape);
ASSERT_TRUE(adaptive_pool->get_output_partial_shape(0).same_scheme({1, Dimension::dynamic(), 5, 7})); EXPECT_EQ(op->get_output_element_type(0), element::f32);
EXPECT_EQ(op->get_output_partial_shape(0), PartialShape({-1, 6, 5, 7}));
EXPECT_THAT(get_shape_labels(op->get_output_partial_shape(0)), ElementsAre(10, 11, ov::no_label, ov::no_label));
} }
TEST(type_prop, adaptive_avg_pool_dyn_spatial) { TEST_F(AdaptiveAvgPoolV8Test, dynamic_channel) {
const PartialShape arg_shape{1, 6, Dimension::dynamic(), Dimension::dynamic()}; PartialShape data_shape{1, Dimension::dynamic(), {10, 20}, 9};
const vector<int64_t> output_shape{5, 7}; set_shape_labels(data_shape, 20);
auto data = make_shared<op::Parameter>(element::f32, arg_shape); const auto data = make_shared<Parameter>(element::f32, data_shape);
auto out_shape = op::Constant::create<int64_t>(element::i64, Shape{2}, output_shape); const auto out_shape = Constant::create<int64_t>(element::i64, Shape{2}, {5, 7});
auto adaptive_pool = make_shared<op::v8::AdaptiveAvgPool>(data, out_shape); const auto op = make_op(data, out_shape);
ASSERT_TRUE(adaptive_pool->get_output_partial_shape(0).same_scheme({1, 6, 5, 7})); EXPECT_EQ(op->get_output_partial_shape(0), PartialShape({1, -1, 5, 7}));
EXPECT_THAT(get_shape_labels(op->get_output_partial_shape(0)), ElementsAre(20, 21, ov::no_label, ov::no_label));
} }
TEST(type_prop, adaptive_avg_pool_dyn_output_shape) { TEST_F(AdaptiveAvgPoolV8Test, dynamic_spatial) {
const PartialShape arg_shape{1, 6, 8, 9}; PartialShape data_shape{1, 6, -1, -1};
set_shape_labels(data_shape, 20);
auto data = make_shared<op::Parameter>(element::f32, arg_shape); const auto data = make_shared<Parameter>(element::f32, data_shape);
auto out_shape = make_shared<op::Parameter>(element::i64, Shape{2}); const auto out_shape = Constant::create<int64_t>(element::i64, Shape{2}, {5, 7});
auto adaptive_pool = make_shared<op::v8::AdaptiveAvgPool>(data, out_shape); const auto op = make_op(data, out_shape);
ASSERT_TRUE( EXPECT_EQ(op->get_output_partial_shape(0), PartialShape({1, 6, 5, 7}));
adaptive_pool->get_output_partial_shape(0).same_scheme({1, 6, Dimension::dynamic(), Dimension::dynamic()})); EXPECT_THAT(get_shape_labels(op->get_output_partial_shape(0)), ElementsAre(20, 21, ov::no_label, ov::no_label));
} }
TEST(type_prop, adaptive_avg_pool_dyn_rank) { TEST_F(AdaptiveAvgPoolV8Test, dynamic_output_shape) {
const PartialShape arg_shape = PartialShape::dynamic(); auto data = make_shared<Parameter>(element::f32, PartialShape{1, 6, 8, 9, 2});
auto out_shape = make_shared<Parameter>(element::i64, PartialShape::dynamic());
const auto op = make_op(data, out_shape);
auto data = make_shared<op::Parameter>(element::f32, arg_shape); EXPECT_EQ(op->get_output_partial_shape(0), PartialShape({1, 6, -1, -1, -1}));
auto out_shape = make_shared<op::Parameter>(element::i64, Shape{2});
auto adaptive_pool = make_shared<op::v8::AdaptiveAvgPool>(data, out_shape);
ASSERT_TRUE(adaptive_pool->get_output_partial_shape(0).same_scheme(PartialShape::dynamic()));
} }
TEST(type_prop, adaptive_avg_pool_unsupported_input_shape) { TEST_F(AdaptiveAvgPoolV8Test, output_shape_as_parameter) {
const PartialShape arg_shape{1, 6}; auto data = make_shared<Parameter>(element::f32, PartialShape{1, 6, 8, 9, 2});
const vector<int64_t> output_shape{1}; auto out_shape = make_shared<Parameter>(element::i64, PartialShape{3});
const auto op = make_op(data, out_shape);
auto data = make_shared<op::Parameter>(element::f32, arg_shape); EXPECT_EQ(op->get_output_partial_shape(0), PartialShape({1, 6, -1, -1, -1}));
auto out_shape = op::Constant::create<int64_t>(element::i64, Shape{}, output_shape);
EXPECT_THROW(const auto unused = make_shared<op::v8::AdaptiveAvgPool>(data, out_shape), NodeValidationFailure);
} }
TEST(type_prop, adaptive_avg_pool_wrong_out_shape) { TEST_F(AdaptiveAvgPoolV8Test, data_dynamic_rank) {
const PartialShape arg_shape{1, 6, 8, 9}; auto data = make_shared<Parameter>(element::f32, PartialShape::dynamic());
const vector<int64_t> output_shape{5, 7, 8}; auto out_shape = make_shared<Parameter>(element::i32, Shape{3});
const auto op = make_op(data, out_shape);
auto data = make_shared<op::Parameter>(element::f32, arg_shape); EXPECT_EQ(op->get_output_partial_shape(0), PartialShape::dynamic());
auto out_shape = op::Constant::create<int64_t>(element::i64, Shape{3}, output_shape); }
EXPECT_THROW(const auto unused = make_shared<op::v8::AdaptiveAvgPool>(data, out_shape), NodeValidationFailure); TEST_F(AdaptiveAvgPoolV8Test, preserve_partial_values_and_labels_on_output_shape_input) {
auto data_shape = PartialShape{{1, 2}, {2, 4}, 5, {10, 20}, -1};
set_shape_labels(data_shape, 10);
auto out_shape = PartialShape{{2, 6}, 3, {12, 13}};
set_shape_labels(out_shape, 20);
const auto data = make_shared<Parameter>(element::f32, data_shape);
const auto spatial_dim_shape = make_shared<ShapeOf>(make_shared<Parameter>(element::i64, out_shape));
const auto op = make_op(data, spatial_dim_shape);
EXPECT_EQ(op->get_output_partial_shape(0), PartialShape({{1, 2}, {2, 4}, {2, 6}, 3, {12, 13}}));
EXPECT_THAT(get_shape_labels(op->get_output_partial_shape(0)), ElementsAre(10, 11, 20, 21, 22));
}
TEST_F(AdaptiveAvgPoolV8Test, out_spatial_shape_size_not_match_data_spatial_dimensions) {
auto data = make_shared<Parameter>(element::f32, PartialShape{2, 3, 5, 6});
auto out_shape = make_shared<Parameter>(element::i32, Shape{3});
OV_EXPECT_THROW(const auto op = make_op(data, out_shape),
NodeValidationFailure,
HasSubstr("Output shape for spatial dimension not compatible with data shape."));
}
TEST_F(AdaptiveAvgPoolV8Test, unsupported_input_shape) {
auto data = make_shared<Parameter>(element::f32, PartialShape{1, 6});
auto out_shape = Constant::create<int64_t>(element::i64, Shape{}, {1});
OV_EXPECT_THROW(const auto op = make_op(data, out_shape),
NodeValidationFailure,
HasSubstr("Expected a 3D, 4D or 5D tensor for the input. Got:"));
}
TEST_F(AdaptiveAvgPoolV8Test, wrong_out_shape) {
auto data = make_shared<Parameter>(element::f32, PartialShape{1, 6, 8, 9});
auto out_shape = Constant::create<int64_t>(element::i64, Shape{3}, {5, 7, 8});
OV_EXPECT_THROW(const auto op = make_op(data, out_shape),
NodeValidationFailure,
HasSubstr("Output shape for spatial dimension not compatible with data shape."));
} }

View File

@ -2,114 +2,191 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "common_test_utils/test_assertions.hpp"
#include "gtest/gtest.h" #include "gtest/gtest.h"
#include "ngraph/ngraph.hpp" #include "openvino/opsets/opset10.hpp"
#include "util/type_prop.hpp" #include "util/type_prop.hpp"
using namespace std; using namespace std;
using namespace ngraph; using namespace ov;
using namespace ov::opset10;
using namespace testing;
TEST(type_prop, adaptive_max_pool) { class AdaptiveMaxPoolV8Test : public TypePropOpTest<op::v8::AdaptiveMaxPool> {};
const PartialShape arg_shape{1, 6, 8, 9};
const vector<int64_t> output_shape{5, 7};
auto data = make_shared<op::Parameter>(element::f32, arg_shape); TEST_F(AdaptiveMaxPoolV8Test, default_ctor) {
auto out_shape = op::Constant::create<int64_t>(element::i64, Shape{2}, output_shape); const auto data = make_shared<Parameter>(element::f32, PartialShape{2, 6, 3, 2});
auto adaptive_pool = make_shared<op::v8::AdaptiveMaxPool>(data, out_shape); const auto out_shape = Constant::create<int64_t>(element::i64, Shape{2}, {5, 7});
ASSERT_TRUE(adaptive_pool->get_output_partial_shape(0).same_scheme({1, 6, 5, 7})); const auto op = make_op();
ASSERT_TRUE(adaptive_pool->get_output_partial_shape(1).same_scheme({1, 6, 5, 7})); op->set_arguments(OutputVector{data, out_shape});
op->set_index_element_type(element::i64);
op->validate_and_infer_types();
EXPECT_EQ(op->get_index_element_type(), element::i64);
EXPECT_EQ(op->get_input_size(), 2);
EXPECT_EQ(op->get_output_size(), 2);
EXPECT_THAT(op->outputs(),
ElementsAre(Property("Output type", &Output<Node>::get_element_type, element::f32),
Property("Indices type", &Output<Node>::get_element_type, element::i64)));
EXPECT_THAT(op->outputs(),
Each(Property("PartialShape", &Output<Node>::get_partial_shape, PartialShape({2, 6, 5, 7}))));
} }
TEST(type_prop, adaptive_max_pool_i32_indices) { TEST_F(AdaptiveMaxPoolV8Test, shape_infer) {
const PartialShape arg_shape{1, 6, 8, 9}; const auto data = make_shared<Parameter>(element::f64, Shape{2, 6, 3, 2, 10});
const vector<int64_t> output_shape{5, 7}; const auto out_shape = Constant::create<int64_t>(element::i64, Shape{3}, {5, 7, 1});
auto data = make_shared<op::Parameter>(element::f32, arg_shape); const auto op = make_op(data, out_shape);
auto out_shape = op::Constant::create<int64_t>(element::i64, Shape{2}, output_shape);
auto adaptive_pool = make_shared<op::v8::AdaptiveMaxPool>(data, out_shape, element::i32);
ASSERT_TRUE(adaptive_pool->get_output_partial_shape(0).same_scheme({1, 6, 5, 7})); EXPECT_THAT(op->outputs(),
ASSERT_EQ(adaptive_pool->output(1).get_element_type(), element::i32); ElementsAre(Property("Output type", &Output<Node>::get_element_type, element::f64),
ASSERT_TRUE(adaptive_pool->get_output_partial_shape(1).same_scheme({1, 6, 5, 7})); Property("Indices type", &Output<Node>::get_element_type, element::i64)));
EXPECT_THAT(op->outputs(), Each(Property("Shape", &Output<Node>::get_shape, Shape({2, 6, 5, 7, 1}))));
} }
TEST(type_prop, adaptive_max_pool_dyn_batch) { TEST_F(AdaptiveMaxPoolV8Test, i32_indices) {
const PartialShape arg_shape{Dimension::dynamic(), 6, 8, 9}; auto data_shape = PartialShape{2, 6, 2, 10};
const vector<int64_t> output_shape{5, 7}; set_shape_labels(data_shape, 10);
auto data = make_shared<op::Parameter>(element::f32, arg_shape); const auto data = make_shared<Parameter>(element::f64, data_shape);
auto out_shape = op::Constant::create<int64_t>(element::i64, Shape{2}, output_shape); const auto out_shape = Constant::create<int32_t>(element::i32, Shape{2}, {7, 1});
auto adaptive_pool = make_shared<op::v8::AdaptiveMaxPool>(data, out_shape);
ASSERT_TRUE(adaptive_pool->get_output_partial_shape(0).same_scheme({Dimension::dynamic(), 6, 5, 7})); const auto op = make_op(data, out_shape, element::i32);
ASSERT_TRUE(adaptive_pool->get_output_partial_shape(1).same_scheme({Dimension::dynamic(), 6, 5, 7}));
EXPECT_THAT(op->outputs(),
ElementsAre(Property("Output type", &Output<Node>::get_element_type, element::f64),
Property("Indices type", &Output<Node>::get_element_type, element::i32)));
EXPECT_THAT(op->outputs(),
Each(Property("PartialShape", &Output<Node>::get_partial_shape, PartialShape({2, 6, 7, 1}))));
EXPECT_THAT(op->outputs(),
Each(Property(&Output<Node>::get_partial_shape,
ResultOf(get_shape_labels, ElementsAre(10, 11, ov::no_label, ov::no_label)))));
} }
TEST(type_prop, adaptive_max_pool_dyn_channels) { TEST_F(AdaptiveMaxPoolV8Test, dynamic_batch) {
const PartialShape arg_shape{1, Dimension::dynamic(), 8, 9}; PartialShape data_shape{Dimension::dynamic(), 6, 8, 9};
const vector<int64_t> output_shape{5, 7}; set_shape_labels(data_shape, 10);
auto data = make_shared<op::Parameter>(element::f32, arg_shape); const auto data = make_shared<Parameter>(element::f32, data_shape);
auto out_shape = op::Constant::create<int64_t>(element::i64, Shape{2}, output_shape); const auto out_shape = Constant::create<int64_t>(element::i64, Shape{2}, {9, 9});
auto adaptive_pool = make_shared<op::v8::AdaptiveMaxPool>(data, out_shape); const auto op = make_op(data, out_shape);
ASSERT_TRUE(adaptive_pool->get_output_partial_shape(0).same_scheme({1, Dimension::dynamic(), 5, 7})); EXPECT_THAT(op->outputs(),
ASSERT_TRUE(adaptive_pool->get_output_partial_shape(1).same_scheme({1, Dimension::dynamic(), 5, 7})); Each(Property("PartialShape", &Output<Node>::get_partial_shape, PartialShape({-1, 6, 9, 9}))));
EXPECT_THAT(op->outputs(),
Each(Property(&Output<Node>::get_partial_shape,
ResultOf(get_shape_labels, ElementsAre(10, 11, ov::no_label, ov::no_label)))));
} }
TEST(type_prop, adaptive_max_pool_dyn_spatial) { TEST_F(AdaptiveMaxPoolV8Test, dynamic_channel) {
const PartialShape arg_shape{1, 6, Dimension::dynamic(), Dimension::dynamic()}; PartialShape data_shape{2, Dimension::dynamic(), {10, 20}, 9};
const vector<int64_t> output_shape{5, 7}; set_shape_labels(data_shape, 10);
auto data = make_shared<op::Parameter>(element::f32, arg_shape); const auto data = make_shared<Parameter>(element::f32, data_shape);
auto out_shape = op::Constant::create<int64_t>(element::i64, Shape{2}, output_shape); const auto out_shape = Constant::create<int64_t>(element::i64, Shape{2}, {5, 7});
auto adaptive_pool = make_shared<op::v8::AdaptiveMaxPool>(data, out_shape); const auto op = make_op(data, out_shape);
ASSERT_TRUE(adaptive_pool->get_output_partial_shape(0).same_scheme({1, 6, 5, 7})); EXPECT_THAT(op->outputs(),
ASSERT_TRUE(adaptive_pool->get_output_partial_shape(1).same_scheme({1, 6, 5, 7})); Each(Property("PartialShape", &Output<Node>::get_partial_shape, PartialShape({2, -1, 5, 7}))));
EXPECT_THAT(op->outputs(),
Each(Property(&Output<Node>::get_partial_shape,
ResultOf(get_shape_labels, ElementsAre(10, 11, ov::no_label, ov::no_label)))));
} }
TEST(type_prop, adaptive_max_pool_dyn_output_shape) { TEST_F(AdaptiveMaxPoolV8Test, dynamic_spatial) {
const PartialShape arg_shape{1, 6, 8, 9}; PartialShape data_shape{2, 6, -1, -1};
set_shape_labels(data_shape, 10);
auto data = make_shared<op::Parameter>(element::f32, arg_shape); const auto data = make_shared<Parameter>(element::f32, data_shape);
auto out_shape = make_shared<op::Parameter>(element::i64, Shape{2}); const auto out_shape = Constant::create<int64_t>(element::i64, Shape{2}, {5, 7});
auto adaptive_pool = make_shared<op::v8::AdaptiveMaxPool>(data, out_shape); const auto op = make_op(data, out_shape);
ASSERT_TRUE( EXPECT_THAT(op->outputs(),
adaptive_pool->get_output_partial_shape(0).same_scheme({1, 6, Dimension::dynamic(), Dimension::dynamic()})); Each(Property("PartialShape", &Output<Node>::get_partial_shape, PartialShape({2, 6, 5, 7}))));
ASSERT_TRUE( EXPECT_THAT(op->outputs(),
adaptive_pool->get_output_partial_shape(1).same_scheme({1, 6, Dimension::dynamic(), Dimension::dynamic()})); Each(Property(&Output<Node>::get_partial_shape,
ResultOf(get_shape_labels, ElementsAre(10, 11, ov::no_label, ov::no_label)))));
} }
TEST(type_prop, adaptive_max_pool_dyn_rank) { TEST_F(AdaptiveMaxPoolV8Test, dynamic_output_shape) {
const PartialShape arg_shape = PartialShape::dynamic(); auto data = make_shared<Parameter>(element::f32, PartialShape{1, 6, 8, 9, 2});
auto out_shape = make_shared<Parameter>(element::i64, PartialShape::dynamic());
const auto op = make_op(data, out_shape);
auto data = make_shared<op::Parameter>(element::f32, arg_shape); EXPECT_THAT(op->outputs(),
auto out_shape = make_shared<op::Parameter>(element::i64, Shape{2}); Each(Property("PartialShape", &Output<Node>::get_partial_shape, PartialShape({1, 6, -1, -1, -1}))));
auto adaptive_pool = make_shared<op::v8::AdaptiveMaxPool>(data, out_shape);
ASSERT_TRUE(adaptive_pool->get_output_partial_shape(0).same_scheme(PartialShape::dynamic()));
ASSERT_TRUE(adaptive_pool->get_output_partial_shape(1).same_scheme(PartialShape::dynamic()));
} }
TEST(type_prop, adaptive_max_pool_unsupported_input_shape) { TEST_F(AdaptiveMaxPoolV8Test, output_shape_as_parameter) {
const PartialShape arg_shape{1, 6}; auto data = make_shared<Parameter>(element::f32, PartialShape{1, 6, 8, 9, 2});
const vector<int64_t> output_shape{1}; auto out_shape = make_shared<Parameter>(element::i64, PartialShape{3});
const auto op = make_op(data, out_shape);
auto data = make_shared<op::Parameter>(element::f32, arg_shape); EXPECT_THAT(op->outputs(),
auto out_shape = op::Constant::create<int64_t>(element::i64, Shape{}, output_shape); Each(Property("PartialShape", &Output<Node>::get_partial_shape, PartialShape({1, 6, -1, -1, -1}))));
EXPECT_THROW(const auto unused = make_shared<op::v8::AdaptiveMaxPool>(data, out_shape), NodeValidationFailure);
} }
TEST(type_prop, adaptive_max_pool_wrong_out_shape) { TEST_F(AdaptiveMaxPoolV8Test, data_dynamic_rank) {
const PartialShape arg_shape{1, 6, 8, 9}; auto data = make_shared<Parameter>(element::f32, PartialShape::dynamic());
const vector<int64_t> output_shape{5, 7, 8}; auto out_shape = make_shared<Parameter>(element::i32, Shape{3});
const auto op = make_op(data, out_shape);
auto data = make_shared<op::Parameter>(element::f32, arg_shape); EXPECT_THAT(op->outputs(),
auto out_shape = op::Constant::create<int64_t>(element::i64, Shape{3}, output_shape); Each(Property("PartialShape", &Output<Node>::get_partial_shape, PartialShape::dynamic())));
}
EXPECT_THROW(const auto unused = make_shared<op::v8::AdaptiveMaxPool>(data, out_shape), NodeValidationFailure);
TEST_F(AdaptiveMaxPoolV8Test, out_spatial_shape_size_not_match_data_spatial_dimensions) {
auto data = make_shared<Parameter>(element::f32, PartialShape{2, 3, 5, 6});
auto out_shape = make_shared<Parameter>(element::i32, Shape{3});
OV_EXPECT_THROW(const auto op = make_op(data, out_shape),
NodeValidationFailure,
HasSubstr("Output shape for spatial dimension not compatible with data shape."));
}
TEST_F(AdaptiveMaxPoolV8Test, preserve_partial_values_and_labels_on_output_shape_input) {
auto data_shape = PartialShape{{1, 2}, {2, 4}, 5, {10, 20}, -1};
set_shape_labels(data_shape, 10);
auto out_shape = PartialShape{{2, 6}, -1, {12, 13}};
set_shape_labels(out_shape, 20);
const auto data = make_shared<Parameter>(element::f32, data_shape);
const auto spatial_dim_shape = make_shared<ShapeOf>(make_shared<Parameter>(element::i64, out_shape));
const auto op = make_op(data, spatial_dim_shape);
EXPECT_THAT(op->outputs(),
Each(Property("PartialShape",
&Output<Node>::get_partial_shape,
PartialShape({{1, 2}, {2, 4}, {2, 6}, -1, {12, 13}}))));
EXPECT_THAT(
op->outputs(),
Each(Property(&Output<Node>::get_partial_shape, ResultOf(get_shape_labels, ElementsAre(10, 11, 20, 21, 22)))));
}
TEST_F(AdaptiveMaxPoolV8Test, unsupported_input_shape) {
auto data = make_shared<Parameter>(element::f32, PartialShape{1, 6});
auto out_shape = Constant::create<int64_t>(element::i64, Shape{}, {1});
OV_EXPECT_THROW(const auto op = make_op(data, out_shape),
NodeValidationFailure,
HasSubstr("Expected a 3D, 4D or 5D tensor for the input. Got:"));
}
TEST_F(AdaptiveMaxPoolV8Test, wrong_out_shape) {
auto data = make_shared<Parameter>(element::f32, PartialShape{1, 6, 8, 9});
auto out_shape = Constant::create<int64_t>(element::i64, Shape{3}, {5, 7, 8});
OV_EXPECT_THROW(const auto op = make_op(data, out_shape),
NodeValidationFailure,
HasSubstr("Output shape for spatial dimension not compatible with data shape."));
}
TEST_F(AdaptiveMaxPoolV8Test, wrong_index_element_type) {
auto data = make_shared<Parameter>(element::f32, PartialShape{1, 6, 8, 9});
auto out_shape = Constant::create<int64_t>(element::i16, Shape{2}, {5, 7});
OV_EXPECT_THROW(const auto op = make_op(data, out_shape, element::i16),
NodeValidationFailure,
HasSubstr("Index element type must be i32 or i64"));
} }

View File

@ -2,12 +2,39 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "common_test_utils/test_assertions.hpp"
#include "gtest/gtest.h" #include "gtest/gtest.h"
#include "ngraph/ngraph.hpp" #include "ngraph/ngraph.hpp"
#include "util/type_prop.hpp" #include "util/type_prop.hpp"
using namespace std; using namespace std;
using namespace ngraph; using namespace ngraph;
using namespace testing;
TEST(type_prop, avg_pool_default_ctor) {
PartialShape arg_shape{1, 3, 32};
set_shape_labels(arg_shape, 10);
auto arg = make_shared<op::Parameter>(element::f32, arg_shape);
auto mp = make_shared<op::v1::AvgPool>();
mp->set_argument(0, arg);
mp->set_pads_begin({2});
mp->set_pads_end({2});
mp->set_kernel({2});
mp->set_strides({1});
mp->set_rounding_type(op::RoundingType::CEIL);
mp->set_auto_pad(op::PadType::SAME_LOWER);
mp->validate_and_infer_types();
EXPECT_TRUE(mp->get_exclude_pad());
EXPECT_EQ(mp->get_input_size(), 1);
EXPECT_EQ(mp->get_output_size(), 1);
EXPECT_EQ(mp->get_output_element_type(0), element::f32);
EXPECT_EQ(mp->get_output_partial_shape(0), PartialShape({1, 3, 32}));
EXPECT_THAT(get_shape_labels(mp->get_output_partial_shape(0)), ElementsAre(10, 11, ov::no_label));
EXPECT_EQ(mp->get_pads_begin(), (Shape{1}));
EXPECT_EQ(mp->get_pads_end(), (Shape{0}));
}
TEST(type_prop, avg_pool_auto_padding) { TEST(type_prop, avg_pool_auto_padding) {
const PartialShape arg_shape{1, 3, 32}; const PartialShape arg_shape{1, 3, 32};
@ -29,20 +56,20 @@ TEST(type_prop, avg_pool_auto_padding) {
rounding_mode, rounding_mode,
auto_pad); auto_pad);
ASSERT_TRUE(mp->get_output_partial_shape(0).same_scheme({1, 3, 32})); EXPECT_EQ(mp->get_output_partial_shape(0), PartialShape({1, 3, 32}));
ASSERT_EQ(mp->get_pads_begin(), (Shape{1})); EXPECT_EQ(mp->get_pads_begin(), (Shape{1}));
ASSERT_EQ(mp->get_pads_end(), (Shape{0})); EXPECT_EQ(mp->get_pads_end(), (Shape{0}));
} }
TEST(type_prop, avg_pool_auto_padding_3D_nc_dims_dynamic_same_lower) { TEST(type_prop, avg_pool_explicit_padding_round_ceil_dynamic_dimensions) {
const PartialShape arg_shape{Dimension::dynamic(), 32, 32}; const PartialShape arg_shape{-1, -1, -1};
const Strides strides{1}; const Strides strides{4};
const Shape pads_begin{0}; const Shape pads_begin{2};
const Shape pads_end{0}; const Shape pads_end{2};
const Shape kernel_shape{2}; const Shape kernel_shape{4};
const bool exclude_pad = true; const bool exclude_pad = true;
const auto rounding_mode = op::RoundingType::FLOOR; const auto rounding_mode = op::RoundingType::CEIL;
const auto auto_pad = op::PadType::SAME_LOWER; const auto auto_pad = op::PadType::EXPLICIT;
auto arg = make_shared<op::Parameter>(element::f32, arg_shape); auto arg = make_shared<op::Parameter>(element::f32, arg_shape);
auto mp = make_shared<op::v1::AvgPool>(arg, auto mp = make_shared<op::v1::AvgPool>(arg,
@ -54,9 +81,9 @@ TEST(type_prop, avg_pool_auto_padding_3D_nc_dims_dynamic_same_lower) {
rounding_mode, rounding_mode,
auto_pad); auto_pad);
ASSERT_TRUE(mp->get_output_partial_shape(0).same_scheme({Dimension::dynamic(), 32, 32})); EXPECT_EQ(mp->get_output_partial_shape(0), PartialShape({-1, -1, {1, -1}}));
ASSERT_EQ(mp->get_pads_begin(), (Shape{1})); EXPECT_EQ(mp->get_pads_begin(), (Shape{2}));
ASSERT_EQ(mp->get_pads_end(), (Shape{0})); EXPECT_EQ(mp->get_pads_end(), (Shape{2}));
} }
TEST(type_prop, avg_pool_auto_padding_4D_nc_dims_dynamic_same_lower) { TEST(type_prop, avg_pool_auto_padding_4D_nc_dims_dynamic_same_lower) {
@ -79,9 +106,9 @@ TEST(type_prop, avg_pool_auto_padding_4D_nc_dims_dynamic_same_lower) {
rounding_mode, rounding_mode,
auto_pad); auto_pad);
ASSERT_TRUE(mp->get_output_partial_shape(0).same_scheme({Dimension::dynamic(), Dimension::dynamic(), 32, 32})); EXPECT_EQ(mp->get_output_partial_shape(0), PartialShape({Dimension::dynamic(), Dimension::dynamic(), 32, 32}));
ASSERT_EQ(mp->get_pads_begin(), (Shape{1, 1})); EXPECT_EQ(mp->get_pads_begin(), (Shape{1, 1}));
ASSERT_EQ(mp->get_pads_end(), (Shape{0, 0})); EXPECT_EQ(mp->get_pads_end(), (Shape{0, 0}));
} }
TEST(type_prop, avg_pool_auto_padding_nc_dims_dynamic_same_upper) { TEST(type_prop, avg_pool_auto_padding_nc_dims_dynamic_same_upper) {
@ -104,9 +131,9 @@ TEST(type_prop, avg_pool_auto_padding_nc_dims_dynamic_same_upper) {
rounding_mode, rounding_mode,
auto_pad); auto_pad);
ASSERT_TRUE(mp->get_output_partial_shape(0).same_scheme({Dimension::dynamic(), Dimension::dynamic(), 32, 32})); EXPECT_EQ(mp->get_output_partial_shape(0), PartialShape({Dimension::dynamic(), Dimension::dynamic(), 32, 32}));
ASSERT_EQ(mp->get_pads_begin(), (Shape{0, 0})); EXPECT_EQ(mp->get_pads_begin(), (Shape{0, 0}));
ASSERT_EQ(mp->get_pads_end(), (Shape{1, 1})); EXPECT_EQ(mp->get_pads_end(), (Shape{1, 1}));
} }
TEST(type_prop, avg_pool_auto_padding_spatial_dims_dynamic) { TEST(type_prop, avg_pool_auto_padding_spatial_dims_dynamic) {
@ -129,9 +156,9 @@ TEST(type_prop, avg_pool_auto_padding_spatial_dims_dynamic) {
rounding_mode, rounding_mode,
auto_pad); auto_pad);
ASSERT_TRUE(mp->get_output_partial_shape(0).same_scheme({1, 3, 32, Dimension::dynamic()})); EXPECT_EQ(mp->get_output_partial_shape(0), PartialShape({1, 3, 32, Dimension::dynamic()}));
ASSERT_EQ(mp->get_pads_begin(), (Shape{1, 0})); EXPECT_EQ(mp->get_pads_begin(), (Shape{1, 0}));
ASSERT_EQ(mp->get_pads_end(), (Shape{0, 0})); EXPECT_EQ(mp->get_pads_end(), (Shape{0, 0}));
} }
TEST(type_prop, avg_pool_1d_deduce) { TEST(type_prop, avg_pool_1d_deduce) {
@ -429,8 +456,8 @@ TEST(type_prop, avg_pool_partial_rank_dynamic_ok) {
false, false,
op::RoundingType::FLOOR); op::RoundingType::FLOOR);
ASSERT_EQ(ap->get_output_element_type(0), element::f32); EXPECT_EQ(ap->get_output_element_type(0), element::f32);
ASSERT_TRUE(ap->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(6))); EXPECT_EQ(ap->get_output_partial_shape(0), PartialShape(PartialShape::dynamic(6)));
} }
TEST(type_prop, avg_pool_partial_rank_dynamic_attrib_rank_mismatch) { TEST(type_prop, avg_pool_partial_rank_dynamic_attrib_rank_mismatch) {
@ -468,8 +495,8 @@ TEST(type_prop, avg_pool_partial_rank_static_dynamic_ok) {
false, false,
op::RoundingType::FLOOR); op::RoundingType::FLOOR);
ASSERT_EQ(ap->get_output_element_type(0), element::f32); EXPECT_EQ(ap->get_output_element_type(0), element::f32);
ASSERT_TRUE(ap->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(5))); EXPECT_EQ(ap->get_output_partial_shape(0), PartialShape({-1, -1, {1, -1}, {1, -1}, {1, -1}}));
} }
TEST(type_prop, avg_pool_partial_rank_static_dynamic_some_dims_known_ok) { TEST(type_prop, avg_pool_partial_rank_static_dynamic_some_dims_known_ok) {
@ -488,9 +515,8 @@ TEST(type_prop, avg_pool_partial_rank_static_dynamic_some_dims_known_ok) {
false, false,
op::RoundingType::FLOOR); op::RoundingType::FLOOR);
ASSERT_EQ(ap->get_output_element_type(0), element::f32); EXPECT_EQ(ap->get_output_element_type(0), element::f32);
ASSERT_TRUE( EXPECT_EQ(ap->get_output_partial_shape(0), PartialShape(PartialShape{5, -1, 7, {1, -1}, 1}));
ap->get_output_partial_shape(0).same_scheme(PartialShape{5, Dimension::dynamic(), 7, Dimension::dynamic(), 1}));
} }
TEST(type_prop, avg_pool_partial_rank_static_dynamic_attrib_rank_mismatch) { TEST(type_prop, avg_pool_partial_rank_static_dynamic_attrib_rank_mismatch) {
@ -547,9 +573,8 @@ TEST(type_prop, avg_pool_partial_rank_static_dynamic_padded_window_not_too_big)
true, true,
op::RoundingType::FLOOR); op::RoundingType::FLOOR);
ASSERT_EQ(ap->get_output_element_type(0), element::f32); EXPECT_EQ(ap->get_output_element_type(0), element::f32);
ASSERT_TRUE( EXPECT_EQ(ap->get_output_partial_shape(0), PartialShape(PartialShape{5, Dimension::dynamic(), 1, {1, -1}, 1}));
ap->get_output_partial_shape(0).same_scheme(PartialShape{5, Dimension::dynamic(), 1, Dimension::dynamic(), 1}));
} }
TEST(type_prop, avg_pool_partial_rank_static_dynamic_window_in_padding) { TEST(type_prop, avg_pool_partial_rank_static_dynamic_window_in_padding) {
@ -570,3 +595,43 @@ TEST(type_prop, avg_pool_partial_rank_static_dynamic_window_in_padding) {
op::RoundingType::FLOOR), op::RoundingType::FLOOR),
NodeValidationFailure); NodeValidationFailure);
} }
TEST(type_prop, avg_pool_kernel_dilation_not_compatible_with_padding_begin) {
const PartialShape arg_shape{5, -1, 8};
const Shape kernel{9};
const Strides window_movement_strides{1};
const Shape pads_begin{10};
const Shape pads_end{0};
const auto param = make_shared<op::Parameter>(element::f32, arg_shape);
OV_EXPECT_THROW(const auto unused = make_shared<op::v1::AvgPool>(param,
window_movement_strides,
pads_begin,
pads_end,
kernel,
true,
op::RoundingType::FLOOR),
NodeValidationFailure,
HasSubstr("Kernel after dilation is sometimes entirely in the padding area for axis 0"));
}
TEST(type_prop, avg_pool_kernel_dilation_not_compatible_with_padding_end) {
const PartialShape arg_shape{5, -1, 8};
const Shape kernel{9};
const Strides window_movement_strides{1};
const Shape pads_begin{0};
const Shape pads_end{10};
const auto param = make_shared<op::Parameter>(element::f32, arg_shape);
OV_EXPECT_THROW(const auto unused = make_shared<op::v1::AvgPool>(param,
window_movement_strides,
pads_begin,
pads_end,
kernel,
true,
op::RoundingType::FLOOR),
NodeValidationFailure,
HasSubstr("Kernel after dilation is sometimes entirely in the padding area for axis 0"));
}

View File

@ -8,9 +8,37 @@
using namespace std; using namespace std;
using namespace ngraph; using namespace ngraph;
using namespace testing;
TEST(type_prop, max_pool_default_ctor) {
PartialShape arg_shape{1, 3, 32};
set_shape_labels(arg_shape, 10);
const Strides strides{1};
const Shape pads_begin{2};
const Shape pads_end{2};
const Shape kernel_shape{2};
auto arg = make_shared<op::Parameter>(element::f32, arg_shape);
auto mp = make_shared<op::v1::MaxPool>();
mp->set_argument(0, arg);
mp->set_pads_begin(pads_begin);
mp->set_pads_end(pads_end);
mp->set_kernel(kernel_shape);
mp->set_strides(strides);
mp->set_rounding_type(op::RoundingType::CEIL);
mp->set_auto_pad(op::PadType::VALID);
mp->validate_and_infer_types();
EXPECT_EQ(mp->get_output_partial_shape(0), PartialShape({1, 3, 31}));
EXPECT_THAT(get_shape_labels(mp->get_output_partial_shape(0)), ElementsAre(10, 11, ov::no_label));
EXPECT_EQ(mp->get_pads_begin(), (Shape{0}));
EXPECT_EQ(mp->get_pads_end(), (Shape{0}));
}
TEST(type_prop, max_pool_valid_auto_padding) { TEST(type_prop, max_pool_valid_auto_padding) {
const PartialShape arg_shape{1, 3, 32}; PartialShape arg_shape{1, 3, {10, 32}};
set_shape_labels(arg_shape, 10);
const Strides strides{1}; const Strides strides{1};
const Shape pads_begin{2}; const Shape pads_begin{2};
const Shape pads_end{2}; const Shape pads_end{2};
@ -20,9 +48,10 @@ TEST(type_prop, max_pool_valid_auto_padding) {
auto arg = make_shared<op::Parameter>(element::f32, arg_shape); auto arg = make_shared<op::Parameter>(element::f32, arg_shape);
auto mp = make_shared<op::v1::MaxPool>(arg, strides, pads_begin, pads_end, kernel_shape, rounding_mode, auto_pad); auto mp = make_shared<op::v1::MaxPool>(arg, strides, pads_begin, pads_end, kernel_shape, rounding_mode, auto_pad);
ASSERT_TRUE(mp->get_output_partial_shape(0).same_scheme({1, 3, 31})); EXPECT_EQ(mp->get_output_partial_shape(0), PartialShape({1, 3, {9, 31}}));
ASSERT_EQ(mp->get_pads_begin(), (Shape{0})); EXPECT_THAT(get_shape_labels(mp->get_output_partial_shape(0)), ElementsAre(10, 11, ov::no_label));
ASSERT_EQ(mp->get_pads_end(), (Shape{0})); EXPECT_EQ(mp->get_pads_begin(), (Shape{0}));
EXPECT_EQ(mp->get_pads_end(), (Shape{0}));
} }
TEST(type_prop, max_pool_1D_auto_padding) { TEST(type_prop, max_pool_1D_auto_padding) {
@ -37,9 +66,9 @@ TEST(type_prop, max_pool_1D_auto_padding) {
auto arg = make_shared<op::Parameter>(element::f32, arg_shape); auto arg = make_shared<op::Parameter>(element::f32, arg_shape);
auto mp = make_shared<op::v1::MaxPool>(arg, strides, pads_begin, pads_end, kernel_shape, rounding_mode, auto_pad); auto mp = make_shared<op::v1::MaxPool>(arg, strides, pads_begin, pads_end, kernel_shape, rounding_mode, auto_pad);
ASSERT_TRUE(mp->get_output_partial_shape(0).same_scheme({1, 3, 32})); EXPECT_EQ(mp->get_output_partial_shape(0), PartialShape({1, 3, 32}));
ASSERT_EQ(mp->get_pads_begin(), (Shape{1})); EXPECT_EQ(mp->get_pads_begin(), (Shape{1}));
ASSERT_EQ(mp->get_pads_end(), (Shape{0})); EXPECT_EQ(mp->get_pads_end(), (Shape{0}));
} }
TEST(type_prop, max_pool_2D_auto_padding) { TEST(type_prop, max_pool_2D_auto_padding) {
@ -54,9 +83,9 @@ TEST(type_prop, max_pool_2D_auto_padding) {
auto arg = make_shared<op::Parameter>(element::f32, arg_shape); auto arg = make_shared<op::Parameter>(element::f32, arg_shape);
auto mp = make_shared<op::v1::MaxPool>(arg, strides, pads_begin, pads_end, kernel_shape, rounding_mode, auto_pad); auto mp = make_shared<op::v1::MaxPool>(arg, strides, pads_begin, pads_end, kernel_shape, rounding_mode, auto_pad);
ASSERT_TRUE(mp->get_output_partial_shape(0).same_scheme({1, 3, 32, 32})); EXPECT_EQ(mp->get_output_partial_shape(0), PartialShape({1, 3, 32, 32}));
ASSERT_EQ(mp->get_pads_begin(), (Shape{1, 1})); EXPECT_EQ(mp->get_pads_begin(), (Shape{1, 1}));
ASSERT_EQ(mp->get_pads_end(), (Shape{0, 0})); EXPECT_EQ(mp->get_pads_end(), (Shape{0, 0}));
} }
TEST(type_prop, max_pool_auto_padding_1D_nc_dims_dynamic_same_lower) { TEST(type_prop, max_pool_auto_padding_1D_nc_dims_dynamic_same_lower) {
@ -71,9 +100,9 @@ TEST(type_prop, max_pool_auto_padding_1D_nc_dims_dynamic_same_lower) {
auto arg = make_shared<op::Parameter>(element::f32, arg_shape); auto arg = make_shared<op::Parameter>(element::f32, arg_shape);
auto mp = make_shared<op::v1::MaxPool>(arg, strides, pads_begin, pads_end, kernel_shape, rounding_mode, auto_pad); auto mp = make_shared<op::v1::MaxPool>(arg, strides, pads_begin, pads_end, kernel_shape, rounding_mode, auto_pad);
ASSERT_TRUE(mp->get_output_partial_shape(0).same_scheme({Dimension::dynamic(), 32, 32})); EXPECT_EQ(mp->get_output_partial_shape(0), PartialShape({Dimension::dynamic(), 32, 32}));
ASSERT_EQ(mp->get_pads_begin(), (Shape{1})); EXPECT_EQ(mp->get_pads_begin(), (Shape{1}));
ASSERT_EQ(mp->get_pads_end(), (Shape{0})); EXPECT_EQ(mp->get_pads_end(), (Shape{0}));
} }
TEST(type_prop, max_pool_auto_padding_2D_nc_dims_dynamic_same_lower) { TEST(type_prop, max_pool_auto_padding_2D_nc_dims_dynamic_same_lower) {
@ -88,13 +117,14 @@ TEST(type_prop, max_pool_auto_padding_2D_nc_dims_dynamic_same_lower) {
auto arg = make_shared<op::Parameter>(element::f32, arg_shape); auto arg = make_shared<op::Parameter>(element::f32, arg_shape);
auto mp = make_shared<op::v1::MaxPool>(arg, strides, pads_begin, pads_end, kernel_shape, rounding_mode, auto_pad); auto mp = make_shared<op::v1::MaxPool>(arg, strides, pads_begin, pads_end, kernel_shape, rounding_mode, auto_pad);
ASSERT_TRUE(mp->get_output_partial_shape(0).same_scheme({Dimension::dynamic(), Dimension::dynamic(), 32, 32})); EXPECT_EQ(mp->get_output_partial_shape(0), PartialShape({Dimension::dynamic(), Dimension::dynamic(), 32, 32}));
ASSERT_EQ(mp->get_pads_begin(), (Shape{1, 1})); EXPECT_EQ(mp->get_pads_begin(), (Shape{1, 1}));
ASSERT_EQ(mp->get_pads_end(), (Shape{0, 0})); EXPECT_EQ(mp->get_pads_end(), (Shape{0, 0}));
} }
TEST(type_prop, max_pool_auto_padding_nc_dims_dynamic_same_upper) { TEST(type_prop, max_pool_auto_padding_nc_dims_dynamic_same_upper) {
const PartialShape arg_shape{Dimension::dynamic(), Dimension::dynamic(), 32, 32}; PartialShape arg_shape{Dimension::dynamic(), Dimension::dynamic(), 32, 32};
set_shape_labels(arg_shape, 10);
const Strides strides{1, 1}; const Strides strides{1, 1};
const Shape pads_begin{0, 0}; const Shape pads_begin{0, 0};
const Shape pads_end{0, 0}; const Shape pads_end{0, 0};
@ -105,9 +135,29 @@ TEST(type_prop, max_pool_auto_padding_nc_dims_dynamic_same_upper) {
auto arg = make_shared<op::Parameter>(element::f32, arg_shape); auto arg = make_shared<op::Parameter>(element::f32, arg_shape);
auto mp = make_shared<op::v1::MaxPool>(arg, strides, pads_begin, pads_end, kernel_shape, rounding_mode, auto_pad); auto mp = make_shared<op::v1::MaxPool>(arg, strides, pads_begin, pads_end, kernel_shape, rounding_mode, auto_pad);
ASSERT_TRUE(mp->get_output_partial_shape(0).same_scheme({Dimension::dynamic(), Dimension::dynamic(), 32, 32})); EXPECT_EQ(mp->get_output_partial_shape(0), PartialShape({Dimension::dynamic(), Dimension::dynamic(), 32, 32}));
ASSERT_EQ(mp->get_pads_begin(), (Shape{0, 0})); EXPECT_THAT(get_shape_labels(mp->get_output_partial_shape(0)), ElementsAre(10, 11, ov::no_label, ov::no_label));
ASSERT_EQ(mp->get_pads_end(), (Shape{1, 1})); EXPECT_EQ(mp->get_pads_begin(), (Shape{0, 0}));
EXPECT_EQ(mp->get_pads_end(), (Shape{1, 1}));
}
TEST(type_prop, max_pool_auto_padding_interval_dims_same_upper) {
PartialShape arg_shape{{1, 2}, {2, 3}, {16, 32}, {11, 32}};
set_shape_labels(arg_shape, 10);
const Strides strides{1, 1};
const Shape pads_begin{0, 0};
const Shape pads_end{0, 0};
const Shape kernel_shape{2, 2};
const auto rounding_mode = op::RoundingType::FLOOR;
const auto auto_pad = op::PadType::SAME_UPPER;
auto arg = make_shared<op::Parameter>(element::f32, arg_shape);
auto mp = make_shared<op::v1::MaxPool>(arg, strides, pads_begin, pads_end, kernel_shape, rounding_mode, auto_pad);
EXPECT_EQ(mp->get_output_partial_shape(0), PartialShape({{1, 2}, {2, 3}, -1, -1}));
EXPECT_THAT(get_shape_labels(mp->get_output_partial_shape(0)), ElementsAre(10, 11, ov::no_label, ov::no_label));
EXPECT_EQ(mp->get_pads_begin(), (Shape{0, 0}));
EXPECT_EQ(mp->get_pads_end(), (Shape{0, 0}));
} }
TEST(type_prop, max_pool_auto_padding_spatial_dims_dynamic) { TEST(type_prop, max_pool_auto_padding_spatial_dims_dynamic) {
@ -122,9 +172,9 @@ TEST(type_prop, max_pool_auto_padding_spatial_dims_dynamic) {
auto arg = make_shared<op::Parameter>(element::f32, arg_shape); auto arg = make_shared<op::Parameter>(element::f32, arg_shape);
auto mp = make_shared<op::v1::MaxPool>(arg, strides, pads_begin, pads_end, kernel_shape, rounding_mode, auto_pad); auto mp = make_shared<op::v1::MaxPool>(arg, strides, pads_begin, pads_end, kernel_shape, rounding_mode, auto_pad);
ASSERT_TRUE(mp->get_output_partial_shape(0).same_scheme({1, 3, 32, Dimension::dynamic()})); EXPECT_EQ(mp->get_output_partial_shape(0), PartialShape({1, 3, 32, Dimension::dynamic()}));
ASSERT_EQ(mp->get_pads_begin(), (Shape{1, 0})); EXPECT_EQ(mp->get_pads_begin(), (Shape{1, 0}));
ASSERT_EQ(mp->get_pads_end(), (Shape{0, 0})); EXPECT_EQ(mp->get_pads_end(), (Shape{0, 0}));
} }
TEST(type_prop, max_pool_default_values) { TEST(type_prop, max_pool_default_values) {
@ -137,8 +187,8 @@ TEST(type_prop, max_pool_default_values) {
auto arg = make_shared<op::Parameter>(element::f32, arg_shape); auto arg = make_shared<op::Parameter>(element::f32, arg_shape);
auto mp = make_shared<op::v1::MaxPool>(arg, strides, pads_begin, pads_end, kernel_shape); auto mp = make_shared<op::v1::MaxPool>(arg, strides, pads_begin, pads_end, kernel_shape);
ASSERT_EQ(mp->get_rounding_type(), op::RoundingType::FLOOR); EXPECT_EQ(mp->get_rounding_type(), op::RoundingType::FLOOR);
ASSERT_EQ(mp->get_auto_pad(), op::PadType::EXPLICIT); EXPECT_EQ(mp->get_auto_pad(), op::PadType::EXPLICIT);
} }
TEST(type_prop, max_pool_v8_3D_no_dilations) { TEST(type_prop, max_pool_v8_3D_no_dilations) {
@ -153,8 +203,8 @@ TEST(type_prop, max_pool_v8_3D_no_dilations) {
const auto mp = make_shared<op::v8::MaxPool>(arg, strides, dilations, pads_begin, pads_end, kernel_shape); const auto mp = make_shared<op::v8::MaxPool>(arg, strides, dilations, pads_begin, pads_end, kernel_shape);
const auto expected_output_shape = PartialShape({1, 7, 11}); const auto expected_output_shape = PartialShape({1, 7, 11});
ASSERT_TRUE(mp->get_output_partial_shape(0).same_scheme(expected_output_shape)); EXPECT_EQ(mp->get_output_partial_shape(0), expected_output_shape);
ASSERT_TRUE(mp->get_output_partial_shape(1).same_scheme(expected_output_shape)); EXPECT_EQ(mp->get_output_partial_shape(1), expected_output_shape);
} }
TEST(type_prop, max_pool_v8_3D_with_dilations) { TEST(type_prop, max_pool_v8_3D_with_dilations) {
@ -169,8 +219,8 @@ TEST(type_prop, max_pool_v8_3D_with_dilations) {
const auto mp = make_shared<op::v8::MaxPool>(arg, strides, dilations, pads_begin, pads_end, kernel_shape); const auto mp = make_shared<op::v8::MaxPool>(arg, strides, dilations, pads_begin, pads_end, kernel_shape);
const auto expected_output_shape = PartialShape({1, 7, 9}); const auto expected_output_shape = PartialShape({1, 7, 9});
ASSERT_TRUE(mp->get_output_partial_shape(0).same_scheme(expected_output_shape)); EXPECT_EQ(mp->get_output_partial_shape(0), expected_output_shape);
ASSERT_TRUE(mp->get_output_partial_shape(1).same_scheme(expected_output_shape)); EXPECT_EQ(mp->get_output_partial_shape(1), expected_output_shape);
} }
TEST(type_prop, max_pool_v8_3D_with_dilations_and_padding) { TEST(type_prop, max_pool_v8_3D_with_dilations_and_padding) {
@ -185,8 +235,8 @@ TEST(type_prop, max_pool_v8_3D_with_dilations_and_padding) {
const auto mp = make_shared<op::v8::MaxPool>(arg, strides, dilations, pads_begin, pads_end, kernel_shape); const auto mp = make_shared<op::v8::MaxPool>(arg, strides, dilations, pads_begin, pads_end, kernel_shape);
const auto expected_output_shape = PartialShape({1, 7, 12}); const auto expected_output_shape = PartialShape({1, 7, 12});
ASSERT_TRUE(mp->get_output_partial_shape(0).same_scheme(expected_output_shape)); EXPECT_EQ(mp->get_output_partial_shape(0), expected_output_shape);
ASSERT_TRUE(mp->get_output_partial_shape(1).same_scheme(expected_output_shape)); EXPECT_EQ(mp->get_output_partial_shape(1), expected_output_shape);
} }
TEST(type_prop, max_pool_v8_4D_no_dilations) { TEST(type_prop, max_pool_v8_4D_no_dilations) {
@ -201,8 +251,8 @@ TEST(type_prop, max_pool_v8_4D_no_dilations) {
const auto mp = make_shared<op::v8::MaxPool>(arg, strides, dilations, pads_begin, pads_end, kernel_shape); const auto mp = make_shared<op::v8::MaxPool>(arg, strides, dilations, pads_begin, pads_end, kernel_shape);
const auto expected_output_shape = PartialShape({1, 3, 12, 12}); const auto expected_output_shape = PartialShape({1, 3, 12, 12});
ASSERT_TRUE(mp->get_output_partial_shape(0).same_scheme(expected_output_shape)); EXPECT_EQ(mp->get_output_partial_shape(0), expected_output_shape);
ASSERT_TRUE(mp->get_output_partial_shape(1).same_scheme(expected_output_shape)); EXPECT_EQ(mp->get_output_partial_shape(1), expected_output_shape);
} }
TEST(type_prop, max_pool_v8_4D_with_dilations) { TEST(type_prop, max_pool_v8_4D_with_dilations) {
@ -217,6 +267,51 @@ TEST(type_prop, max_pool_v8_4D_with_dilations) {
const auto mp = make_shared<op::v8::MaxPool>(arg, strides, dilations, pads_begin, pads_end, kernel_shape); const auto mp = make_shared<op::v8::MaxPool>(arg, strides, dilations, pads_begin, pads_end, kernel_shape);
const auto expected_output_shape = PartialShape({1, 3, 11, 10}); const auto expected_output_shape = PartialShape({1, 3, 11, 10});
ASSERT_TRUE(mp->get_output_partial_shape(0).same_scheme(expected_output_shape)); EXPECT_EQ(mp->get_output_partial_shape(0), expected_output_shape);
ASSERT_TRUE(mp->get_output_partial_shape(1).same_scheme(expected_output_shape)); EXPECT_EQ(mp->get_output_partial_shape(1), expected_output_shape);
}
TEST(type_prop, max_pool_v8_4D_interval_dims_with_dilations) {
PartialShape arg_shape{{2, 3}, {1, 3}, {2, 13}, {6, 13}};
set_shape_labels(arg_shape, 10);
const Strides strides{1, 1};
const Strides dilations{2, 3};
const Shape pads_begin{0, 0};
const Shape pads_end{0, 0};
const Shape kernel_shape{2, 2};
const auto arg = make_shared<op::Parameter>(element::f32, arg_shape);
const auto mp = make_shared<op::v8::MaxPool>(arg, strides, dilations, pads_begin, pads_end, kernel_shape);
const auto expected_output_shape = PartialShape({{2, 3}, {1, 3}, {1, 11}, {3, 10}});
EXPECT_EQ(mp->get_output_partial_shape(0), expected_output_shape);
EXPECT_EQ(mp->get_output_partial_shape(1), expected_output_shape);
EXPECT_THAT(get_shape_labels(mp->get_output_partial_shape(0)), ElementsAre(10, 11, ov::no_label, ov::no_label));
}
TEST(type_prop, max_pool_v8_4D_with_dilations_and_auto_pad_same_upper) {
const PartialShape arg_shape{1, 3, 13, 13};
const Strides strides{1, 1};
const Strides dilations{2, 3};
const Shape pads_begin{0, 0};
const Shape pads_end{0, 0};
const Shape kernel_shape{3, 3};
const auto rounding_mode = op::RoundingType::FLOOR;
const auto auto_pad = op::PadType::SAME_UPPER;
const auto arg = make_shared<op::Parameter>(element::f32, arg_shape);
const auto mp = make_shared<op::v8::MaxPool>(arg,
strides,
dilations,
pads_begin,
pads_end,
kernel_shape,
rounding_mode,
auto_pad);
const auto expected_output_shape = PartialShape({1, 3, 13, 13});
EXPECT_EQ(mp->get_output_partial_shape(0), expected_output_shape);
EXPECT_EQ(mp->get_output_partial_shape(1), expected_output_shape);
EXPECT_EQ(mp->get_pads_begin(), (Shape{2, 3}));
EXPECT_EQ(mp->get_pads_end(), (Shape{2, 3}));
} }

View File

@ -8,9 +8,12 @@
#include <openvino/opsets/opset5.hpp> #include <openvino/opsets/opset5.hpp>
#include <openvino/opsets/opset7.hpp> #include <openvino/opsets/opset7.hpp>
#include "adaptive_avg_pool_shape_inference.hpp"
#include "adaptive_max_pool_shape_inference.hpp"
#include "assign_shape_inference.hpp" #include "assign_shape_inference.hpp"
#include "augru_cell_shape_inference.hpp" #include "augru_cell_shape_inference.hpp"
#include "augru_sequence_shape_inference.hpp" #include "augru_sequence_shape_inference.hpp"
#include "avg_pool_shape_inference.hpp"
#include "batch_to_space_shape_inference.hpp" #include "batch_to_space_shape_inference.hpp"
#include "broadcast_shape_inference.hpp" #include "broadcast_shape_inference.hpp"
#include "bucketize_shape_inference.hpp" #include "bucketize_shape_inference.hpp"
@ -45,6 +48,7 @@
#include "irdft_shape_inference.hpp" #include "irdft_shape_inference.hpp"
#include "lstm_cell_shape_inference.hpp" #include "lstm_cell_shape_inference.hpp"
#include "matmul_shape_inference.hpp" #include "matmul_shape_inference.hpp"
#include "max_pool_shape_inference.hpp"
#include "one_hot_shape_inference.hpp" #include "one_hot_shape_inference.hpp"
#include "pad_shape_inference.hpp" #include "pad_shape_inference.hpp"
#include "proposal_shape_inference.hpp" #include "proposal_shape_inference.hpp"
@ -255,11 +259,7 @@ static inline ov::CoordinateDiff convertPadding(const ov::CoordinateDiff& newPad
} }
static inline ov::CoordinateDiff convertPadding(const ov::Shape& newPads) { static inline ov::CoordinateDiff convertPadding(const ov::Shape& newPads) {
std::vector<ptrdiff_t> pads(newPads.size()); return {newPads.begin(), newPads.end()};
for (int i = 0; i < newPads.size(); i++) {
pads[i] = static_cast<ptrdiff_t>(newPads[i]);
}
return pads;
} }
template <typename OP> template <typename OP>
@ -361,13 +361,48 @@ protected:
ov::CoordinateDiff pads_begin, pads_end; ov::CoordinateDiff pads_begin, pads_end;
}; };
template <class TOp>
class ShapeInferBaseWithPadding : public entryBase {
public:
ShapeInferBaseWithPadding(std::shared_ptr<Node> node) : entryBase{std::move(node)}, m_pads_begin{}, m_pads_end{} {}
IShapeInferCommon::Result infer(const std::vector<StaticShape>& input_shapes,
const std::map<size_t, ov::HostTensorPtr>& constant_data) override {
auto out_shapes = shape_infer(static_cast<TOp*>(node.get()), input_shapes);
on_infer_exit();
return {std::move(out_shapes), ShapeInferStatus::success};
}
const ov::CoordinateDiff& get_pads_begin() override {
return m_pads_begin;
}
const ov::CoordinateDiff& get_pads_end() override {
return m_pads_end;
}
protected:
void on_infer_exit() {
auto op = static_cast<TOp*>(node.get());
m_pads_begin = convertPadding(op->get_pads_begin());
m_pads_end = convertPadding(op->get_pads_end());
}
ov::CoordinateDiff m_pads_begin, m_pads_end;
};
/**
* @brief Base shape inference object implementing the IStaticShapeInfer without padding support
*
* @tparam TOp Type of operator.
*/
template <class TOp> template <class TOp>
class ShapeInferBase : public IStaticShapeInfer { class ShapeInferBase : public IStaticShapeInfer {
public: public:
using iface_type = IStaticShapeInfer; using iface_type = IStaticShapeInfer;
virtual ~ShapeInferBase() = default; virtual ~ShapeInferBase() = default;
ShapeInferBase(std::shared_ptr<Node> node) : m_node{node} { ShapeInferBase(std::shared_ptr<Node> node) : m_input_ranks{}, m_node{node} {
static_assert(std::is_same<int64_t, Dimension::value_type>::value, "Rank type not match to input_ranks type."); static_assert(std::is_same<int64_t, Dimension::value_type>::value, "Rank type not match to input_ranks type.");
for (size_t i = 0; i < node->get_input_size(); ++i) { for (size_t i = 0; i < node->get_input_size(); ++i) {
const auto& shape = node->get_input_partial_shape(i); const auto& shape = node->get_input_partial_shape(i);
@ -508,8 +543,10 @@ const IShapeInferCommonFactory::TRegistry IShapeInferCommonFactory::registry{
_OV_OP_NON_TEMPLATE_SHAPE_INFER_REG(Selu, entryFirstPassthrough), _OV_OP_NON_TEMPLATE_SHAPE_INFER_REG(Selu, entryFirstPassthrough),
_OV_OP_NON_TEMPLATE_SHAPE_INFER_REG(Softmax, entryCopy), _OV_OP_NON_TEMPLATE_SHAPE_INFER_REG(Softmax, entryCopy),
_OV_OP_NON_TEMPLATE_SHAPE_INFER_REG(Swish, entryFirstPassthrough), _OV_OP_NON_TEMPLATE_SHAPE_INFER_REG(Swish, entryFirstPassthrough),
_OV_OP_SHAPE_INFER_REG(AdaptiveAvgPool, entryIOC),
_OV_OP_SHAPE_INFER_REG(AdaptiveMaxPool, entryIOC),
_OV_OP_SHAPE_INFER_REG(Assign, entryIO), _OV_OP_SHAPE_INFER_REG(Assign, entryIO),
_OV_OP_SHAPE_INFER_REG(AvgPool, entryFallbackWithPadding), _OV_OP_SHAPE_INFER_REG(AvgPool, ShapeInferBaseWithPadding),
_OV_OP_SHAPE_INFER_REG(BatchToSpace, entryIOC), _OV_OP_SHAPE_INFER_REG(BatchToSpace, entryIOC),
_OV_OP_SHAPE_INFER_REG(Broadcast, entryIOC), _OV_OP_SHAPE_INFER_REG(Broadcast, entryIOC),
_OV_OP_SHAPE_INFER_REG(Bucketize, entryIO), _OV_OP_SHAPE_INFER_REG(Bucketize, entryIO),
@ -544,7 +581,7 @@ const IShapeInferCommonFactory::TRegistry IShapeInferCommonFactory::registry{
_OV_OP_SHAPE_INFER_REG(IRDFT, entryIOC), _OV_OP_SHAPE_INFER_REG(IRDFT, entryIOC),
_OV_OP_SHAPE_INFER_REG(LSTMCell, entryIO), _OV_OP_SHAPE_INFER_REG(LSTMCell, entryIO),
_OV_OP_SHAPE_INFER_REG(MatMul, entryIO), _OV_OP_SHAPE_INFER_REG(MatMul, entryIO),
_OV_OP_SHAPE_INFER_REG(MaxPool, entryFallbackWithPadding), _OV_OP_SHAPE_INFER_REG(MaxPool, ShapeInferBaseWithPadding),
_OV_OP_SHAPE_INFER_REG(OneHot, entryIOC), _OV_OP_SHAPE_INFER_REG(OneHot, entryIOC),
_OV_OP_SHAPE_INFER_REG(ov::op::internal::AUGRUCell, entryIO), _OV_OP_SHAPE_INFER_REG(ov::op::internal::AUGRUCell, entryIO),
_OV_OP_SHAPE_INFER_REG(ov::op::internal::AUGRUSequence, entryIO), _OV_OP_SHAPE_INFER_REG(ov::op::internal::AUGRUSequence, entryIO),
@ -607,7 +644,7 @@ const IShapeInferCommonFactory::TRegistry IShapeInferCommonFactory::registry{
_OV_OP_SHAPE_INFER_REG(opset1::DetectionOutput, entryIO), _OV_OP_SHAPE_INFER_REG(opset1::DetectionOutput, entryIO),
_OV_OP_SHAPE_INFER_REG(opset1::Interpolate, entryIOC), _OV_OP_SHAPE_INFER_REG(opset1::Interpolate, entryIOC),
_OV_OP_SHAPE_INFER_REG(opset1::LSTMCell, entryIO), _OV_OP_SHAPE_INFER_REG(opset1::LSTMCell, entryIO),
_OV_OP_SHAPE_INFER_REG(opset1::MaxPool, entryFallbackWithPadding), _OV_OP_SHAPE_INFER_REG(opset1::MaxPool, ShapeInferBaseWithPadding),
_OV_OP_SHAPE_INFER_REG(opset1::Proposal, entryIO), _OV_OP_SHAPE_INFER_REG(opset1::Proposal, entryIO),
_OV_OP_SHAPE_INFER_REG(opset1::Range, entryIOC), _OV_OP_SHAPE_INFER_REG(opset1::Range, entryIOC),
_OV_OP_SHAPE_INFER_REG(opset1::ShapeOf, entryIO), _OV_OP_SHAPE_INFER_REG(opset1::ShapeOf, entryIO),

View File

@ -42,8 +42,12 @@ public:
bool operator==(const StaticDimension& dimension) const; bool operator==(const StaticDimension& dimension) const;
bool operator!=(const StaticDimension& dimension) const; bool operator!=(const StaticDimension& dimension) const;
static bool is_static() { return true; } static constexpr bool is_static() {
static bool is_dynamic() { return false; } return true;
}
static constexpr bool is_dynamic() {
return false;
}
value_type get_length() const; value_type get_length() const;
value_type get_min_length() const; value_type get_min_length() const;

View File

@ -0,0 +1,79 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gmock/gmock.h>
#include "common_test_utils/test_assertions.hpp"
#include "openvino/opsets/opset10.hpp"
#include "utils.hpp"
using namespace ov;
using namespace ov::intel_cpu;
using namespace testing;
class AdaptiveAvgPoolV8StaticShapeInferenceTest : public OpStaticShapeInferenceTest<op::v8::AdaptiveAvgPool> {
protected:
void SetUp() override {
output_shapes.resize(1);
}
};
TEST_F(AdaptiveAvgPoolV8StaticShapeInferenceTest, default_ctor) {
int32_t spatial_dims[] = {10, 20};
const std::map<size_t, HostTensorPtr> const_data{
{1, std::make_shared<HostTensor>(element::i32, ov::Shape{2}, spatial_dims)}};
op = make_op();
input_shapes = ShapeVector{{1, 3, 1, 2}, {2}};
shape_inference(op.get(), input_shapes, output_shapes, const_data);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes.front(), StaticShape({1, 3, 10, 20}));
}
TEST_F(AdaptiveAvgPoolV8StaticShapeInferenceTest, out_spatial_dims_as_constant) {
const auto data = std::make_shared<op::v0::Parameter>(element::f64, PartialShape{-1, -1, -2});
const auto out_shape = op::v0::Constant::create<int64_t>(element::i64, ov::Shape{1}, {17});
op = make_op(data, out_shape);
input_shapes = ShapeVector{{1, 3, 10}, {1}};
shape_inference(op.get(), input_shapes, output_shapes);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes.front(), StaticShape({1, 3, 17}));
}
TEST_F(AdaptiveAvgPoolV8StaticShapeInferenceTest, out_spatial_dims_in_const_map) {
const auto data = std::make_shared<op::v0::Parameter>(element::f64, PartialShape::dynamic());
const auto out_shape = std::make_shared<op::v0::Parameter>(element::i32, PartialShape::dynamic());
op = make_op(data, out_shape);
int32_t spatial_dims[] = {9, 8, 7};
const std::map<size_t, HostTensorPtr> const_data{
{1, std::make_shared<HostTensor>(element::i32, ov::Shape{3}, spatial_dims)}};
input_shapes = ShapeVector{{1, 3, 10, 2, 4}, {3}};
shape_inference(op.get(), input_shapes, output_shapes, const_data);
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes.front(), StaticShape({1, 3, 9, 8, 7}));
}
TEST_F(AdaptiveAvgPoolV8StaticShapeInferenceTest, out_spatial_dims_in_const_map_has_wrong_length) {
const auto data = std::make_shared<op::v0::Parameter>(element::f64, PartialShape::dynamic());
const auto out_shape = std::make_shared<op::v0::Parameter>(element::i32, PartialShape::dynamic());
op = make_op(data, out_shape);
int32_t spatial_dims[] = {9, 8};
const std::map<size_t, HostTensorPtr> const_data{
{1, std::make_shared<HostTensor>(element::i32, ov::Shape{2}, spatial_dims)}};
input_shapes = ShapeVector{{1, 3, 10, 2, 4}, {3}};
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, output_shapes, const_data),
ov::NodeValidationFailure,
HasSubstr("Number of spatial dimensions is not compatible with input data rank"));
}

View File

@ -0,0 +1,79 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gmock/gmock.h>
#include "common_test_utils/test_assertions.hpp"
#include "openvino/opsets/opset10.hpp"
#include "utils.hpp"
using namespace ov;
using namespace ov::intel_cpu;
using namespace testing;
class AdaptiveMaxPoolV8StaticShapeInferenceTest : public OpStaticShapeInferenceTest<op::v8::AdaptiveMaxPool> {
protected:
void SetUp() override {
output_shapes.resize(2);
}
};
TEST_F(AdaptiveMaxPoolV8StaticShapeInferenceTest, default_ctor) {
int32_t spatial_dims[] = {10, 20};
const std::map<size_t, HostTensorPtr> const_data{
{1, std::make_shared<HostTensor>(element::i32, ov::Shape{2}, spatial_dims)}};
op = make_op();
input_shapes = ShapeVector{{1, 3, 1, 2}, {2}};
shape_inference(op.get(), input_shapes, output_shapes, const_data);
EXPECT_EQ(output_shapes.size(), 2);
EXPECT_THAT(output_shapes, Each(StaticShape({1, 3, 10, 20})));
}
TEST_F(AdaptiveMaxPoolV8StaticShapeInferenceTest, out_spatial_dims_as_constant) {
const auto data = std::make_shared<op::v0::Parameter>(element::f64, PartialShape{-1, -1, -2});
const auto out_shape = op::v0::Constant::create<int64_t>(element::i64, ov::Shape{1}, {17});
op = make_op(data, out_shape);
input_shapes = ShapeVector{{1, 3, 10}, {1}};
shape_inference(op.get(), input_shapes, output_shapes);
EXPECT_EQ(output_shapes.size(), 2);
EXPECT_THAT(output_shapes, Each(StaticShape({1, 3, 17})));
}
TEST_F(AdaptiveMaxPoolV8StaticShapeInferenceTest, out_spatial_dims_in_const_map) {
const auto data = std::make_shared<op::v0::Parameter>(element::f64, PartialShape::dynamic());
const auto out_shape = std::make_shared<op::v0::Parameter>(element::i32, PartialShape::dynamic());
op = make_op(data, out_shape);
int32_t spatial_dims[] = {9, 8, 7};
const std::map<size_t, HostTensorPtr> const_data{
{1, std::make_shared<HostTensor>(element::i32, ov::Shape{3}, spatial_dims)}};
input_shapes = ShapeVector{{1, 3, 10, 2, 4}, {3}};
shape_inference(op.get(), input_shapes, output_shapes, const_data);
EXPECT_EQ(output_shapes.size(), 2);
EXPECT_THAT(output_shapes, Each(StaticShape({1, 3, 9, 8, 7})));
}
TEST_F(AdaptiveMaxPoolV8StaticShapeInferenceTest, out_spatial_dims_in_const_map_has_wrong_length) {
const auto data = std::make_shared<op::v0::Parameter>(element::f64, PartialShape::dynamic());
const auto out_shape = std::make_shared<op::v0::Parameter>(element::i32, PartialShape::dynamic());
op = make_op(data, out_shape);
int32_t spatial_dims[] = {9, 8};
const std::map<size_t, HostTensorPtr> const_data{
{1, std::make_shared<HostTensor>(element::i32, ov::Shape{2}, spatial_dims)}};
input_shapes = ShapeVector{{1, 3, 10, 2, 4}, {3}};
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, output_shapes, const_data),
ov::NodeValidationFailure,
HasSubstr("Number of spatial dimensions is not compatible with input data rank"));
}

View File

@ -0,0 +1,127 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gmock/gmock.h>
#include "common_test_utils/test_assertions.hpp"
#include "openvino/opsets/opset10.hpp"
#include "utils.hpp"
using namespace ov;
using namespace ov::intel_cpu;
using namespace testing;
class AvgPoolV1StaticShapeInferenceTest : public OpStaticShapeInferenceTest<op::v1::AvgPool> {
protected:
void SetUp() override {
output_shapes.resize(1);
}
};
TEST_F(AvgPoolV1StaticShapeInferenceTest, default_ctor) {
op = make_op();
op->set_strides({1, 1});
op->set_pads_begin({2, 2});
op->set_pads_end({2, 1});
op->set_kernel({3, 2});
op->set_rounding_type(op::RoundingType::FLOOR);
op->set_auto_pad(op::PadType::VALID);
input_shapes = ShapeVector{{1, 3, 10, 12}};
auto shape_infer = make_shape_inference(op);
output_shapes = shape_infer->infer(input_shapes, {}).shapes;
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes.front(), StaticShape({1, 3, 8, 11}));
EXPECT_EQ(shape_infer->get_pads_begin(), CoordinateDiff({0, 0}));
EXPECT_EQ(shape_infer->get_pads_end(), CoordinateDiff({0, 0}));
}
TEST_F(AvgPoolV1StaticShapeInferenceTest, no_auto_pad_round_floor) {
const auto data = std::make_shared<op::v0::Parameter>(element::f64, PartialShape{-1, -1, -1, -1});
const Strides strides{1, 1};
const Shape pads_begin{2, 2};
const Shape pads_end{2, 1};
const Shape kernel_shape{3, 2};
const auto rounding_mode = op::RoundingType::FLOOR;
const auto pad_type = op::PadType::EXPLICIT;
op = make_op(data, strides, pads_begin, pads_end, kernel_shape, false, rounding_mode, pad_type);
input_shapes = ShapeVector{{1, 3, 10, 12}};
auto shape_infer = make_shape_inference(op);
output_shapes = shape_infer->infer(input_shapes, {}).shapes;
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes.front(), StaticShape({1, 3, 12, 14}));
EXPECT_EQ(shape_infer->get_pads_begin(), CoordinateDiff({2, 2}));
EXPECT_EQ(shape_infer->get_pads_end(), CoordinateDiff({2, 1}));
}
TEST_F(AvgPoolV1StaticShapeInferenceTest, auto_padding_same_lower_round_ceil) {
const auto data = std::make_shared<op::v0::Parameter>(element::f64, PartialShape::dynamic());
const Strides strides{1, 3, 2};
const Shape pads_begin{2, 2, 1};
const Shape pads_end{2, 1, 10};
const Shape kernel_shape{5, 5, 5};
const auto rounding_mode = op::RoundingType::CEIL;
const auto pad_type = op::PadType::SAME_LOWER;
op = make_op(data, strides, pads_begin, pads_end, kernel_shape, false, rounding_mode, pad_type);
input_shapes = ShapeVector{{1, 3, 10, 12, 20}};
auto shape_infer = make_shape_inference(op);
output_shapes = shape_infer->infer(input_shapes, {}).shapes;
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes.front(), StaticShape({1, 3, 10, 4, 10}));
EXPECT_EQ(shape_infer->get_pads_begin(), CoordinateDiff({2, 1, 2}));
EXPECT_EQ(shape_infer->get_pads_end(), CoordinateDiff({2, 1, 1}));
}
TEST_F(AvgPoolV1StaticShapeInferenceTest, auto_padding_same_upper_round_floor_exclude_pad) {
const auto data = std::make_shared<op::v0::Parameter>(element::f64, PartialShape::dynamic());
const Strides strides{1, 3, 2};
const Shape pads_begin{2, 2, 1};
const Shape pads_end{2, 1, 10};
const Shape kernel_shape{5, 5, 5};
const auto rounding_mode = op::RoundingType::FLOOR;
const auto pad_type = op::PadType::SAME_UPPER;
op = make_op(data, strides, pads_begin, pads_end, kernel_shape, true, rounding_mode, pad_type);
input_shapes = ShapeVector{{1, 3, 10, 12, 20}};
auto shape_infer = make_shape_inference(op);
output_shapes = shape_infer->infer(input_shapes, {}).shapes;
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes.front(), StaticShape({1, 3, 10, 4, 10}));
EXPECT_EQ(shape_infer->get_pads_begin(), CoordinateDiff({2, 1, 1}));
EXPECT_EQ(shape_infer->get_pads_end(), CoordinateDiff({2, 1, 2}));
}
TEST_F(AvgPoolV1StaticShapeInferenceTest, 5d_auto_padding_same_upper_round_floor) {
const auto data = std::make_shared<op::v0::Parameter>(element::f64, PartialShape::dynamic());
const Strides strides{1, 1, 1};
const Shape pads_begin{0, 0, 0};
const Shape pads_end{0, 0, 0};
const Shape kernel_shape{2, 2, 2};
const auto rounding_mode = op::RoundingType::FLOOR;
const auto pad_type = op::PadType::SAME_UPPER;
op = make_op(data, strides, pads_begin, pads_end, kernel_shape, true, rounding_mode, pad_type);
input_shapes = ShapeVector{{32, 32, 2, 2, 4}};
auto shape_infer = make_shape_inference(op);
output_shapes = shape_infer->infer(input_shapes, {}).shapes;
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes.front(), StaticShape({32, 32, 2, 2, 4}));
EXPECT_EQ(shape_infer->get_pads_begin(), CoordinateDiff({0, 0, 0}));
EXPECT_EQ(shape_infer->get_pads_end(), CoordinateDiff({1, 1, 1}));
}

View File

@ -0,0 +1,152 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gmock/gmock.h>
#include "common_test_utils/test_assertions.hpp"
#include "openvino/opsets/opset10.hpp"
#include "utils.hpp"
using namespace ov;
using namespace ov::intel_cpu;
using namespace testing;
class MaxPoolV1StaticShapeInferenceTest : public OpStaticShapeInferenceTest<op::v1::MaxPool> {
protected:
void SetUp() override {
output_shapes.resize(1);
}
};
TEST_F(MaxPoolV1StaticShapeInferenceTest, default_ctor) {
op = make_op();
op->set_strides({1, 1});
op->set_pads_begin({2, 2});
op->set_pads_end({2, 1});
op->set_kernel({3, 2});
op->set_rounding_type(op::RoundingType::FLOOR);
op->set_auto_pad(op::PadType::VALID);
input_shapes = ShapeVector{{1, 3, 10, 12}};
auto shape_infer = make_shape_inference(op);
output_shapes = shape_infer->infer(input_shapes, {}).shapes;
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes.front(), StaticShape({1, 3, 8, 11}));
EXPECT_EQ(shape_infer->get_pads_begin(), CoordinateDiff({0, 0}));
EXPECT_EQ(shape_infer->get_pads_end(), CoordinateDiff({0, 0}));
}
TEST_F(MaxPoolV1StaticShapeInferenceTest, no_auto_pad_round_floor) {
const auto data = std::make_shared<op::v0::Parameter>(element::f64, PartialShape{-1, -1, -1, -1});
const Strides strides{1, 1};
const Shape pads_begin{2, 2};
const Shape pads_end{2, 1};
const Shape kernel_shape{3, 2};
const auto rounding_mode = op::RoundingType::FLOOR;
const auto pad_type = op::PadType::EXPLICIT;
op = make_op(data, strides, pads_begin, pads_end, kernel_shape, rounding_mode, pad_type);
input_shapes = ShapeVector{{1, 3, 10, 12}};
auto shape_infer = make_shape_inference(op);
output_shapes = shape_infer->infer(input_shapes, {}).shapes;
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes.front(), StaticShape({1, 3, 12, 14}));
EXPECT_EQ(shape_infer->get_pads_begin(), CoordinateDiff({2, 2}));
EXPECT_EQ(shape_infer->get_pads_end(), CoordinateDiff({2, 1}));
}
TEST_F(MaxPoolV1StaticShapeInferenceTest, auto_padding_same_lower_round_ceil) {
const auto data = std::make_shared<op::v0::Parameter>(element::f64, PartialShape::dynamic());
const Strides strides{1, 3, 2};
const Shape pads_begin{2, 2, 1};
const Shape pads_end{2, 1, 10};
const Shape kernel_shape{5, 5, 5};
const auto rounding_mode = op::RoundingType::CEIL;
const auto pad_type = op::PadType::SAME_LOWER;
op = make_op(data, strides, pads_begin, pads_end, kernel_shape, rounding_mode, pad_type);
input_shapes = ShapeVector{{1, 3, 10, 12, 20}};
auto shape_infer = make_shape_inference(op);
output_shapes = shape_infer->infer(input_shapes, {}).shapes;
EXPECT_EQ(output_shapes.size(), 1);
EXPECT_EQ(output_shapes.front(), StaticShape({1, 3, 10, 4, 10}));
EXPECT_EQ(shape_infer->get_pads_begin(), CoordinateDiff({2, 1, 2}));
EXPECT_EQ(shape_infer->get_pads_end(), CoordinateDiff({2, 1, 1}));
}
class MaxPoolV8StaticShapeInferenceTest : public OpStaticShapeInferenceTest<op::v8::MaxPool> {
protected:
void SetUp() override {
output_shapes.resize(2);
}
};
TEST_F(MaxPoolV8StaticShapeInferenceTest, default_ctor) {
op = make_op();
op->set_strides({1, 1});
op->set_pads_begin({2, 2});
op->set_pads_end({2, 1});
op->set_kernel({3, 2});
op->set_dilations({2, 1});
op->set_rounding_type(op::RoundingType::FLOOR);
op->set_auto_pad(op::PadType::VALID);
input_shapes = ShapeVector{{1, 3, 10, 12}};
auto shape_infer = make_shape_inference(op);
output_shapes = shape_infer->infer(input_shapes, {}).shapes;
EXPECT_EQ(output_shapes.size(), 2);
EXPECT_THAT(output_shapes, Each(StaticShape({1, 3, 6, 11})));
EXPECT_EQ(shape_infer->get_pads_begin(), CoordinateDiff({0, 0}));
EXPECT_EQ(shape_infer->get_pads_end(), CoordinateDiff({0, 0}));
}
TEST_F(MaxPoolV8StaticShapeInferenceTest, no_dilation) {
const auto data = std::make_shared<op::v0::Parameter>(element::f64, PartialShape{-1, -1, -1, -1});
const Strides strides{1, 1};
const Strides dilations{1, 1};
const Shape pads_begin{1, 1};
const Shape pads_end{0, 0};
const Shape kernel_shape{2, 2};
op = make_op(data, strides, dilations, pads_begin, pads_end, kernel_shape);
input_shapes = ShapeVector{{2, 3, 13, 13}};
auto shape_infer = make_shape_inference(op);
output_shapes = shape_infer->infer(input_shapes, {}).shapes;
EXPECT_EQ(output_shapes.size(), 2);
EXPECT_THAT(output_shapes, Each(StaticShape({2, 3, 13, 13})));
EXPECT_EQ(shape_infer->get_pads_begin(), CoordinateDiff({1, 1}));
EXPECT_EQ(shape_infer->get_pads_end(), CoordinateDiff({0, 0}));
}
TEST_F(MaxPoolV8StaticShapeInferenceTest, with_dilations) {
const auto data = std::make_shared<op::v0::Parameter>(element::f64, PartialShape::dynamic());
const Strides strides{1, 1};
const Strides dilations{2, 3};
const Shape pads_begin{0, 0};
const Shape pads_end{1, 1};
const Shape kernel_shape{2, 2};
op = make_op(data, strides, dilations, pads_begin, pads_end, kernel_shape);
input_shapes = ShapeVector{{2, 4, 13, 13}};
auto shape_infer = make_shape_inference(op);
output_shapes = shape_infer->infer(input_shapes, {}).shapes;
EXPECT_EQ(output_shapes.size(), 2);
EXPECT_THAT(output_shapes, Each(StaticShape({2, 4, 12, 11})));
EXPECT_EQ(shape_infer->get_pads_begin(), CoordinateDiff({0, 0}));
EXPECT_EQ(shape_infer->get_pads_end(), CoordinateDiff({1, 1}));
}