Maxpool revise (#3397)

* create MaxPoolLayer test

* Create single layer tests for MaxPool for cpu plugin

* create max_pool_2d_ceil unit test

* Update MaxPool spec

* add comments describing AUTO and NOTSET types

* create unit test for MaxPool

* add type_prop test for default values

* add MaxPool unit tests to CMakeList

* Remove second constructor and change the first one so it has default values for rounding_type and auto_pad

* style-apply

* Update the spec

* add max pool single layer test instances for different pad type

* add 5D input max pool single layer test instance for cpu plugin

* Remove max pool single layer tests files

* add more test instances for max pool single layer tests for cpu plugin

* add newline characters
This commit is contained in:
Piotr Szmelczynski 2020-12-03 04:44:20 +01:00 committed by GitHub
parent 071fb9d1c6
commit 8344c29090
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 343 additions and 37 deletions

View File

@ -52,15 +52,16 @@
* *floor*
* **Type**: string
* **Default value**: *floor*
* **Required**: *no*
* *auto_pad*
* **Description**: *auto_pad* how the padding is calculated. Possible values:
* None (not specified): use explicit padding values.
* *explicit*: use explicit padding values.
* *same_upper (same_lower)* the input is padded to match the output size. In case of odd padding value an extra padding is added at the end (at the beginning).
* *valid* - do not use padding.
* **Type**: string
* **Default value**: None
* **Default value**: *explicit*
* **Required**: *no*
* **Note**: *pads_begin* and *pads_end* attributes are ignored when *auto_pad* is specified.
@ -70,9 +71,9 @@
**Mathematical Formulation**
\f[
output_{j} = MAX\{ x_{0}, ... x_{i}\}
\f]
\f[
output_{j} = max(x_{0}, ..., x_{i})
\f]
**Example**

View File

@ -25,12 +25,18 @@ const std::vector<InferenceEngine::Precision> netPrecisions = {
const std::vector<std::vector<size_t >> kernels = {{3, 3},
{3, 5}};
const std::vector<std::vector<size_t >> kernel3D = {{2, 2, 2}};
const std::vector<std::vector<size_t >> strides = {{1, 1},
{1, 2}};
const std::vector<std::vector<size_t >> strides3D = {{1, 1, 1},
{2, 2, 2}};
const std::vector<std::vector<size_t >> padBegins = {{0, 0},
{0, 2}};
const std::vector<std::vector<size_t >> padBegins3D = {{0, 0, 0}};
const std::vector<std::vector<size_t >> padEnds = {{0, 0},
{0, 2}};
const std::vector<std::vector<size_t >> padEnds3D = {{0, 0, 0}};
const std::vector<ngraph::op::RoundingType> roundingTypes = {ngraph::op::RoundingType::CEIL,
ngraph::op::RoundingType::FLOOR};
////* ========== Max Polling ========== */
@ -46,7 +52,7 @@ const auto maxPool_ExplicitPad_FloorRounding_Params = ::testing::Combine(
::testing::Values(false) // placeholder value - exclude pad not applicable for max pooling
);
INSTANTIATE_TEST_CASE_P(smoke_MaxPool_ExplicitPad_FloorRpunding, PoolingLayerTest,
INSTANTIATE_TEST_CASE_P(smoke_MaxPool_ExplicitPad_FloorRounding, PoolingLayerTest,
::testing::Combine(
maxPool_ExplicitPad_FloorRounding_Params,
::testing::ValuesIn(netPrecisions),
@ -58,6 +64,126 @@ INSTANTIATE_TEST_CASE_P(smoke_MaxPool_ExplicitPad_FloorRpunding, PoolingLayerTes
::testing::Values(CommonTestUtils::DEVICE_CPU)),
PoolingLayerTest::getTestCaseName);
/* +========== Same Upper Pad Floor Rounding ========== */
const auto maxPool_SameUpperPad_FloorRounding_Params = ::testing::Combine(
::testing::Values(ngraph::helpers::PoolingTypes::MAX),
::testing::ValuesIn(kernels),
::testing::ValuesIn(strides),
::testing::ValuesIn(padBegins),
::testing::ValuesIn(padEnds),
::testing::Values(ngraph::op::RoundingType::FLOOR),
::testing::Values(ngraph::op::PadType::SAME_UPPER),
::testing::Values(false) // placeholder value - exclude pad not applicable for max pooling
);
INSTANTIATE_TEST_CASE_P(smoke_MaxPool_SameUpperPad_FloorRounding, PoolingLayerTest,
::testing::Combine(
maxPool_SameUpperPad_FloorRounding_Params,
::testing::ValuesIn(netPrecisions),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(std::vector<size_t >({1, 3, 30, 30})),
::testing::Values(CommonTestUtils::DEVICE_CPU)),
PoolingLayerTest::getTestCaseName);
/* +========== Same Lower Pad Floor Rounding ========== */
const auto maxPool_SameLowerPad_FloorRounding_Params = ::testing::Combine(
::testing::Values(ngraph::helpers::PoolingTypes::MAX),
::testing::ValuesIn(kernels),
::testing::ValuesIn(strides),
::testing::ValuesIn(padBegins),
::testing::ValuesIn(padEnds),
::testing::Values(ngraph::op::RoundingType::FLOOR),
::testing::Values(ngraph::op::PadType::SAME_LOWER),
::testing::Values(false) // placeholder value - exclude pad not applicable for max pooling
);
INSTANTIATE_TEST_CASE_P(smoke_MaxPool_SameLowerPad_FloorRounding, PoolingLayerTest,
::testing::Combine(
maxPool_SameUpperPad_FloorRounding_Params,
::testing::ValuesIn(netPrecisions),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(std::vector<size_t >({1, 3, 30, 30})),
::testing::Values(CommonTestUtils::DEVICE_CPU)),
PoolingLayerTest::getTestCaseName);
/* ========== Explicit Pad Floor Rounding 5D input========== */
const auto maxPool_ExplicitPad_FloorRounding_5Dinput_Params = ::testing::Combine(
::testing::Values(ngraph::helpers::PoolingTypes::MAX),
::testing::ValuesIn(kernel3D),
::testing::ValuesIn(strides3D),
::testing::ValuesIn(padBegins3D),
::testing::ValuesIn(padEnds3D),
::testing::Values(ngraph::op::RoundingType::FLOOR),
::testing::Values(ngraph::op::PadType::EXPLICIT),
::testing::Values(false) // placeholder value - exclude pad not applicable for max pooling
);
INSTANTIATE_TEST_CASE_P(smoke_MaxPool_ExplicitPad_FloorRounding_5Dinput, PoolingLayerTest,
::testing::Combine(
maxPool_ExplicitPad_FloorRounding_5Dinput_Params,
::testing::ValuesIn(netPrecisions),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(std::vector<size_t >({32, 32, 2, 2, 2})),
::testing::Values(CommonTestUtils::DEVICE_CPU)),
PoolingLayerTest::getTestCaseName);
/* ========== Same Upper Pad Floor Rounding 5D input========== */
const auto maxPool_SameUpperPad_FloorRounding_5Dinput_Params = ::testing::Combine(
::testing::Values(ngraph::helpers::PoolingTypes::MAX),
::testing::ValuesIn(kernel3D),
::testing::ValuesIn(strides3D),
::testing::ValuesIn(padBegins3D),
::testing::ValuesIn(padEnds3D),
::testing::Values(ngraph::op::RoundingType::FLOOR),
::testing::Values(ngraph::op::PadType::SAME_UPPER),
::testing::Values(false) // placeholder value - exclude pad not applicable for max pooling
);
INSTANTIATE_TEST_CASE_P(smoke_MaxPool_SameUpperPad_FloorRounding_5Dinput, PoolingLayerTest,
::testing::Combine(
maxPool_SameUpperPad_FloorRounding_5Dinput_Params,
::testing::ValuesIn(netPrecisions),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(std::vector<size_t >({32, 32, 2, 2, 2})),
::testing::Values(CommonTestUtils::DEVICE_CPU)),
PoolingLayerTest::getTestCaseName);
/* ========== Same Lower Pad Ceil Rounding 5D input========== */
const auto maxPool_SameLowerPad_CeilRounding_5Dinput_Params = ::testing::Combine(
::testing::Values(ngraph::helpers::PoolingTypes::MAX),
::testing::ValuesIn(kernel3D),
::testing::ValuesIn(strides3D),
::testing::ValuesIn(padBegins3D),
::testing::ValuesIn(padEnds3D),
::testing::Values(ngraph::op::RoundingType::CEIL),
::testing::Values(ngraph::op::PadType::SAME_LOWER),
::testing::Values(false) // placeholder value - exclude pad not applicable for max pooling
);
INSTANTIATE_TEST_CASE_P(smoke_MaxPool_SameLowerPad_CeilRounding_5Dinput, PoolingLayerTest,
::testing::Combine(
maxPool_SameUpperPad_FloorRounding_5Dinput_Params,
::testing::ValuesIn(netPrecisions),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(std::vector<size_t >({32, 32, 2, 2, 2})),
::testing::Values(CommonTestUtils::DEVICE_CPU)),
PoolingLayerTest::getTestCaseName);
/* ========== Explicit Pad Ceil Rounding ========== */
const auto maxPool_ExplicitPad_CeilRounding_Params = ::testing::Combine(
::testing::Values(ngraph::helpers::PoolingTypes::MAX),
@ -70,7 +196,7 @@ const auto maxPool_ExplicitPad_CeilRounding_Params = ::testing::Combine(
::testing::Values(false) // placeholder value - exclude pad not applicable for max pooling
);
INSTANTIATE_TEST_CASE_P(smoke_MaxPool_ExplicitPad_CeilRpunding, PoolingLayerTest,
INSTANTIATE_TEST_CASE_P(smoke_MaxPool_ExplicitPad_CeilRounding, PoolingLayerTest,
::testing::Combine(
maxPool_ExplicitPad_CeilRounding_Params,
::testing::ValuesIn(netPrecisions),

View File

@ -49,24 +49,8 @@ namespace ngraph
const Shape& pads_begin,
const Shape& pads_end,
const Shape& kernel,
op::RoundingType rounding_mode,
const PadType& auto_pad);
/// \brief Constructs a batched max pooling operation.
///
/// \param arg The node producing the input data batch tensor.
/// \param strides The strides.
/// \param pads_begin The beginning of padding shape.
/// \param pads_end The end of padding shape.
/// \param kernel The kernel shape.
/// \param rounding_mode Whether to use ceiling or floor rounding type while
/// computing output shape.
MaxPool(const Output<Node>& arg,
const Strides& strides,
const Shape& pads_begin,
const Shape& pads_end,
const Shape& kernel,
op::RoundingType rounding_mode);
op::RoundingType rounding_mode = op::RoundingType::FLOOR,
const PadType& auto_pad = op::PadType::EXPLICIT);
bool visit_attributes(AttributeVisitor& visitor) override;
size_t get_version() const override { return 1; }
@ -108,7 +92,7 @@ namespace ngraph
Shape m_pads_begin;
Shape m_pads_end;
PadType m_auto_pad;
op::RoundingType m_rounding_type{op::RoundingType::FLOOR};
op::RoundingType m_rounding_type;
private:
bool update_auto_padding(const PartialShape& in_shape,

View File

@ -66,7 +66,9 @@ namespace ngraph
/// Floor(num_dims/2) at the beginning and
/// Ceil(num_dims/2) at the end
/// VALID - No padding
///
/// AUTO - Deprecated. User should not use it in the future
/// NOTSET - Deprecated. User should not use it in the future
enum class PadType
{
EXPLICIT = 0,

View File

@ -68,16 +68,6 @@ op::v1::MaxPool::MaxPool(const Output<Node>& arg,
constructor_validate_and_infer_types();
}
op::v1::MaxPool::MaxPool(const Output<Node>& arg,
const Strides& strides,
const Shape& pads_begin,
const Shape& pads_end,
const Shape& kernel,
op::RoundingType rounding_type)
: v1::MaxPool(arg, strides, pads_begin, pads_end, kernel, rounding_type, op::PadType::EXPLICIT)
{
}
bool ngraph::op::v1::MaxPool::visit_attributes(AttributeVisitor& visitor)
{
visitor.on_attribute("strides", m_strides);

View File

@ -303,6 +303,7 @@ set(MULTI_TEST_SRC
backend/lrn.in.cpp
backend/matmul.in.cpp
backend/maximum.in.cpp
backend/max_pool.in.cpp
backend/minimum.in.cpp
backend/multiple_backends.in.cpp
backend/multiple_result.in.cpp

View File

@ -0,0 +1,187 @@
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
// clang-format off
#ifdef ${BACKEND_NAME}_FLOAT_TOLERANCE_BITS
#define DEFAULT_FLOAT_TOLERANCE_BITS ${BACKEND_NAME}_FLOAT_TOLERANCE_BITS
#endif
#ifdef ${BACKEND_NAME}_DOUBLE_TOLERANCE_BITS
#define DEFAULT_DOUBLE_TOLERANCE_BITS ${BACKEND_NAME}_DOUBLE_TOLERANCE_BITS
#endif
// clang-format on
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "util/engine/test_engines.hpp"
#include "util/test_case.hpp"
#include "util/test_control.hpp"
using namespace std;
using namespace ngraph;
static string s_manifest = "${MANIFEST}";
using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME});
NGRAPH_TEST(${BACKEND_NAME}, max_pool_2d_floor)
{
Shape in_shape{1, 1, 3, 3};
Shape out_shape{1, 1, 2, 2};
const Strides& strides{1, 1};
const Shape& pads_begin{0, 0};
const Shape& pads_end{0, 0};
const Shape& kernel{2, 2};
const op::RoundingType rounding_type = op::RoundingType::FLOOR;
const op::PadType pad_type = op::PadType::NOTSET;
auto A = make_shared<op::Parameter>(element::f32, in_shape);
auto maxPool = make_shared<op::v1::MaxPool>(
A, strides, pads_begin, pads_end, kernel, rounding_type, pad_type);
auto f = make_shared<Function>(maxPool, ParameterVector{A});
std::vector<float> a{1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<float> result{5, 6, 8, 9};
auto test_case = test::TestCase<TestEngine>(f);
test_case.add_input<float>({a});
test_case.add_expected_output<float>(out_shape, result);
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, max_pool_2d_ceil)
{
Shape in_shape{1, 1, 4, 4};
Shape out_shape{1, 1, 2, 2};
const Strides& strides{1, 1};
const Shape& pads_begin{0, 0};
const Shape& pads_end{0, 0};
const Shape& kernel{3, 3};
const op::RoundingType rounding_type = op::RoundingType::CEIL;
const op::PadType pad_type = op::PadType::NOTSET;
auto A = make_shared<op::Parameter>(element::f32, in_shape);
auto maxPool = make_shared<op::v1::MaxPool>(
A, strides, pads_begin, pads_end, kernel, rounding_type, pad_type);
auto f = make_shared<Function>(maxPool, ParameterVector{A});
std::vector<float> a{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
std::vector<float> result{11, 12, 15, 16};
auto test_case = test::TestCase<TestEngine>(f);
test_case.add_input<float>({a});
test_case.add_expected_output<float>(out_shape, result);
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, max_pool_2d_pad)
{
Shape in_shape{1, 1, 2, 2};
Shape out_shape{1, 1, 3, 3};
const Strides& strides{1, 1};
const Shape& pads_begin{1, 1};
const Shape& pads_end{1, 1};
const Shape& kernel{2, 2};
const op::RoundingType rounding_type = op::RoundingType::CEIL;
const op::PadType pad_type = op::PadType::NOTSET;
auto A = make_shared<op::Parameter>(element::f32, in_shape);
auto maxPool = make_shared<op::v1::MaxPool>(
A, strides, pads_begin, pads_end, kernel, rounding_type, pad_type);
auto f = make_shared<Function>(maxPool, ParameterVector{A});
std::vector<float> a{1, 2, 3, 4};
std::vector<float> result{1, 2, 2, 3, 4, 4, 3, 4, 4};
auto test_case = test::TestCase<TestEngine>(f);
test_case.add_input<float>({a});
test_case.add_expected_output<float>(out_shape, result);
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, max_pool_2d_same_upper)
{
Shape in_shape{1, 1, 3, 3};
Shape out_shape{1, 1, 3, 3};
const Strides& strides{1, 1};
const Shape& pads_begin{0, 0};
const Shape& pads_end{0, 0};
const Shape& kernel{2, 2};
const op::RoundingType rounding_type = op::RoundingType::CEIL;
const op::PadType pad_type = op::PadType::SAME_UPPER;
auto A = make_shared<op::Parameter>(element::f32, in_shape);
auto maxPool = make_shared<op::v1::MaxPool>(
A, strides, pads_begin, pads_end, kernel, rounding_type, pad_type);
auto f = make_shared<Function>(maxPool, ParameterVector{A});
std::vector<float> a{1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<float> result{5, 6, 6, 8, 9, 9, 8, 9, 9};
auto test_case = test::TestCase<TestEngine>(f);
test_case.add_input<float>({a});
test_case.add_expected_output<float>(out_shape, result);
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, max_pool_3d)
{
Shape in_shape{1, 1, 2, 2, 2};
Shape out_shape{1, 1, 2, 2, 1};
const Strides& strides{1, 1, 1};
const Shape& pads_begin{0, 0, 0};
const Shape& pads_end{0, 0, 0};
const Shape& kernel{1, 1, 2};
const op::RoundingType rounding_type = op::RoundingType::CEIL;
const op::PadType pad_type = op::PadType::VALID;
auto A = make_shared<op::Parameter>(element::f32, in_shape);
auto maxPool = make_shared<op::v1::MaxPool>(
A, strides, pads_begin, pads_end, kernel, rounding_type, pad_type);
auto f = make_shared<Function>(maxPool, ParameterVector{A});
std::vector<float> a{1, 2, 3, 4, 5, 6, 7, 8};
std::vector<float> result{2, 4, 6, 8};
auto test_case = test::TestCase<TestEngine>(f);
test_case.add_input<float>({a});
test_case.add_expected_output<float>(out_shape, result);
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, max_pool_2d_same_lower)
{
Shape in_shape{1, 1, 3, 3};
Shape out_shape{1, 1, 3, 3};
const Strides& strides{1, 1};
const Shape& pads_begin{0, 0};
const Shape& pads_end{0, 0};
const Shape& kernel{2, 2};
const op::RoundingType rounding_type = op::RoundingType::CEIL;
const op::PadType pad_type = op::PadType::SAME_LOWER;
auto A = make_shared<op::Parameter>(element::f32, in_shape);
auto maxPool = make_shared<op::v1::MaxPool>(
A, strides, pads_begin, pads_end, kernel, rounding_type, pad_type);
auto f = make_shared<Function>(maxPool, ParameterVector{A});
std::vector<float> a{1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<float> result{1, 2, 3, 4, 5, 6, 7, 8, 9};
auto test_case = test::TestCase<TestEngine>(f);
test_case.add_input<float>({a});
test_case.add_expected_output<float>(out_shape, result);
test_case.run();
}

View File

@ -99,3 +99,18 @@ TEST(type_prop, max_pool_auto_padding_spatial_dims_dynamic)
ASSERT_EQ(mp->get_pads_begin(), (Shape{}));
ASSERT_EQ(mp->get_pads_end(), (Shape{}));
}
TEST(type_prop, max_pool_default_values)
{
const PartialShape arg_shape{1, 3, 32, 32};
const Strides strides{1, 1};
const Shape pads_begin{0, 0};
const Shape pads_end{0, 0};
const Shape kernel_shape{2, 2};
auto arg = make_shared<op::Parameter>(element::f32, arg_shape);
auto mp = make_shared<op::v1::MaxPool>(arg, strides, pads_begin, pads_end, kernel_shape);
ASSERT_EQ(mp->get_rounding_type(), op::RoundingType::FLOOR);
ASSERT_EQ(mp->get_auto_pad(), op::PadType::EXPLICIT);
}