Add adaptive pool reference implementation (#6317)

* Add adaptive pool reference implementation

* Add forgotten files

* Fix codestyle

* Disable tests

* Add more tests

* Fix build

* Fix comments

* Remove CoordinateTransform usage from AdaptiveAvgPool

* Fix build

* Refactor AdaptiveMaxPool

* Fix codestyle

* Apply review feedback

* Simplify if statement
This commit is contained in:
Maxim Vafin 2021-07-05 08:36:27 +03:00 committed by GitHub
parent fea3ada931
commit f1af26dbaf
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 685 additions and 2 deletions

View File

@ -44,7 +44,7 @@ Output(i,j,k) = max(Input[d_{start}:d_{end}, h_{start}:h_{end}, w_{start}:w_{end
**Outputs**:
* **1**: Output of type *T* and shape `[N, C, H_out]`, `[N, C, H_out, W_out]` or `[N, C, D_out, H_out, W_out]`.
* **2**: Output of type specified by *index_element_type* and same shape as the first output containing indices of elements in the first output. The values of indices are computed as if input was flatten 1-D tensor, so the values are in the range `[0, N * C * H * W * D)`.
* **2**: Output of type specified by *index_element_type* and same shape as the first output containing indices of elements in the first output. The values of indices are computed as if input spatial dimensions were flatten, so the values are in the range `[0, H * W * D)`.
**Types**

View File

@ -0,0 +1,177 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <cmath>
#include <numeric>
#include <vector>
#include "ngraph/axis_vector.hpp"
#include "ngraph/shape.hpp"
namespace ngraph
{
namespace runtime
{
namespace reference
{
namespace adaptive_pool
{
inline size_t window_start(size_t idx, size_t arg_shape, size_t out_shape)
{
// start = floor(idx * arg_shape / out_shape);
return idx * arg_shape / out_shape;
}
inline size_t window_end(size_t idx, size_t arg_shape, size_t out_shape)
{
return ceil(static_cast<double>((idx + 1) * arg_shape) / out_shape);
}
template <typename T>
T avg_div(const T sum, size_t n)
{
NGRAPH_CHECK(n != 0, "AdaptiveAvgPool elements == 0, must be non-zero");
if (std::is_same<T, int8_t>::value || std::is_same<T, uint8_t>::value)
{
return static_cast<T>(std::nearbyint(static_cast<float>(sum) / n));
}
else
{
return sum / n;
}
}
template <typename T>
void adaptive_avg_pool_1d(const T* arg, T* out, size_t h_in, size_t h_out)
{
for (size_t i = 0; i < h_out; i++)
{
size_t h_start = window_start(i, h_in, h_out);
size_t h_end = window_end(i, h_in, h_out);
out[i] = avg_div(std::accumulate(arg + h_start, arg + h_end, T{0}),
h_end - h_start);
}
}
template <typename T>
void adaptive_avg_pool_2d(
const T* arg, T* out, size_t h_in, size_t h_out, size_t w_in, size_t w_out)
{
for (size_t i = 0; i < h_out; i++)
{
size_t h_start = window_start(i, h_in, h_out);
size_t h_end = window_end(i, h_in, h_out);
for (size_t j = 0; j < w_out; j++)
{
size_t w_start = window_start(j, w_in, w_out);
size_t w_end = window_end(j, w_in, w_out);
T result = 0;
for (size_t n = h_start; n < h_end; n++)
{
result = std::accumulate(
arg + n * w_in + w_start, arg + n * w_in + w_end, result);
}
out[i * w_out + j] =
avg_div(result, (w_end - w_start) * (h_end - h_start));
}
}
}
template <typename T>
void adaptive_avg_pool_3d(const T* arg,
T* out,
size_t d_in,
size_t d_out,
size_t h_in,
size_t h_out,
size_t w_in,
size_t w_out)
{
for (size_t i = 0; i < d_out; i++)
{
size_t d_start = window_start(i, d_in, d_out);
size_t d_end = window_end(i, d_in, d_out);
for (size_t j = 0; j < h_out; j++)
{
size_t h_start = window_start(j, h_in, h_out);
size_t h_end = window_end(j, h_in, h_out);
for (size_t k = 0; k < w_out; k++)
{
size_t w_start = window_start(k, w_in, w_out);
size_t w_end = window_end(k, w_in, w_out);
T result = 0;
for (size_t n = d_start; n < d_end; n++)
{
for (size_t m = h_start; m < h_end; m++)
{
auto pos = arg + n * h_in * w_in + m * w_in;
result =
std::accumulate(pos + w_start, pos + w_end, result);
}
}
out[i * h_out * w_out + j * w_out + k] = avg_div(
result,
(d_end - d_start) * (w_end - w_start) * (h_end - h_start));
}
}
}
}
} // namespace adaptive_pool
template <typename T>
void adaptive_avg_pool(const T* arg,
T* out,
const Shape& arg_shape,
const Shape& out_shape)
{
NGRAPH_CHECK(arg_shape.size() == out_shape.size() && 2 < arg_shape.size() &&
arg_shape.size() < 6,
"AdaptiveAvgPool supports only 3D, 4D and 5D input shape");
size_t channel_size = 1;
for (size_t i = 2; i < arg_shape.size(); i++)
{
channel_size *= arg_shape[i];
}
size_t batch_size = arg_shape[1] * channel_size;
size_t out_channel_size = 1;
for (size_t i = 2; i < out_shape.size(); i++)
{
out_channel_size *= out_shape[i];
}
size_t out_batch_size = arg_shape[1] * out_channel_size;
for (size_t b = 0; b < arg_shape[0]; b++)
{
for (size_t c = 0; c < arg_shape[1]; c++)
{
auto arg_pos = arg + b * batch_size + c * channel_size;
auto out_pos = out + b * out_batch_size + c * out_channel_size;
if (arg_shape.size() == 3)
{
adaptive_pool::adaptive_avg_pool_1d<T>(
arg_pos, out_pos, arg_shape[2], out_shape[2]);
}
else if (arg_shape.size() == 4)
{
adaptive_pool::adaptive_avg_pool_2d<T>(arg_pos,
out_pos,
arg_shape[2],
out_shape[2],
arg_shape[3],
out_shape[3]);
}
else if (arg_shape.size() == 5)
{
adaptive_pool::adaptive_avg_pool_3d<T>(arg_pos,
out_pos,
arg_shape[2],
out_shape[2],
arg_shape[3],
out_shape[3],
arg_shape[4],
out_shape[4]);
}
}
}
}
} // namespace reference
} // namespace runtime
} // namespace ngraph

View File

@ -0,0 +1,170 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <cmath>
#include <numeric>
#include <vector>
#include "ngraph/axis_vector.hpp"
#include "ngraph/shape.hpp"
namespace ngraph
{
namespace runtime
{
namespace reference
{
template <typename T>
void adaptive_max_pool_1d(
const T* arg, T* out, int64_t* indices, size_t h_in, size_t h_out)
{
for (size_t i = 0; i < h_out; i++)
{
auto from = arg + adaptive_pool::window_start(i, h_in, h_out);
auto to = arg + adaptive_pool::window_end(i, h_in, h_out);
NGRAPH_CHECK(to - from != 0, "AdaptiveMaxPool elements == 0, must be non-zero");
auto it = std::max_element(from, to);
out[i] = *it;
indices[i] = it - arg;
}
}
template <typename T>
void adaptive_max_pool_2d(const T* arg,
T* out,
int64_t* indices,
size_t h_in,
size_t h_out,
size_t w_in,
size_t w_out)
{
for (size_t i = 0; i < h_out; i++)
{
size_t h_start = adaptive_pool::window_start(i, h_in, h_out);
size_t h_end = adaptive_pool::window_end(i, h_in, h_out);
for (size_t j = 0; j < w_out; j++)
{
size_t w_start = adaptive_pool::window_start(j, w_in, w_out);
size_t w_end = adaptive_pool::window_end(j, w_in, w_out);
NGRAPH_CHECK((w_end - w_start) * (h_end - h_start) != 0,
"AdaptiveMaxPool elements == 0, must be non-zero");
auto result = arg + h_start * w_in + w_start;
for (size_t n = h_start; n < h_end; n++)
{
auto from = arg + n * w_in + w_start;
auto to = arg + n * w_in + w_end;
auto it = std::max_element(from, to);
result = *it > *result ? it : result;
}
out[i * w_out + j] = *result;
indices[i * w_out + j] = result - arg;
}
}
}
template <typename T>
void adaptive_max_pool_3d(const T* arg,
T* out,
int64_t* indices,
size_t d_in,
size_t d_out,
size_t h_in,
size_t h_out,
size_t w_in,
size_t w_out)
{
for (size_t i = 0; i < d_out; i++)
{
size_t d_start = adaptive_pool::window_start(i, d_in, d_out);
size_t d_end = adaptive_pool::window_end(i, d_in, d_out);
for (size_t j = 0; j < h_out; j++)
{
size_t h_start = adaptive_pool::window_start(j, h_in, h_out);
size_t h_end = adaptive_pool::window_end(j, h_in, h_out);
for (size_t k = 0; k < w_out; k++)
{
size_t w_start = adaptive_pool::window_start(k, w_in, w_out);
size_t w_end = adaptive_pool::window_end(k, w_in, w_out);
NGRAPH_CHECK((w_end - w_start) * (h_end - h_start) != 0,
"AdaptiveMaxPool elements == 0, must be non-zero");
auto result = arg + d_start * h_in * w_in + h_start * w_in + w_start;
for (size_t n = d_start; n < d_end; n++)
{
for (size_t m = h_start; m < h_end; m++)
{
auto from = arg + n * h_in * w_in + m * w_in + w_start;
auto to = arg + n * h_in * w_in + m * w_in + w_end;
auto it = std::max_element(from, to);
result = *it > *result ? it : result;
}
}
out[i * h_out * w_out + j * w_out + k] = *result;
indices[i * h_out * w_out + j * w_out + k] = result - arg;
}
}
}
}
template <typename T>
void adaptive_max_pool(const T* arg,
T* out,
int64_t* selected_indices,
const Shape& arg_shape,
const Shape& out_shape)
{
NGRAPH_CHECK(arg_shape.size() == out_shape.size() && 2 < arg_shape.size() &&
arg_shape.size() < 6,
"AdaptiveAvgPool supports only 3D, 4D and 5D input shape");
size_t channel_size = 1;
for (size_t i = 2; i < arg_shape.size(); i++)
{
channel_size *= arg_shape[i];
}
size_t batch_size = arg_shape[1] * channel_size;
size_t out_channel_size = 1;
for (size_t i = 2; i < out_shape.size(); i++)
{
out_channel_size *= out_shape[i];
}
size_t out_batch_size = arg_shape[1] * out_channel_size;
for (size_t b = 0; b < arg_shape[0]; b++)
{
for (size_t c = 0; c < arg_shape[1]; c++)
{
auto arg_pos = arg + b * batch_size + c * channel_size;
auto out_pos = out + b * out_batch_size + c * out_channel_size;
auto sel_ind_pos =
selected_indices + b * out_batch_size + c * out_channel_size;
if (arg_shape.size() == 3)
{
adaptive_max_pool_1d<T>(
arg_pos, out_pos, sel_ind_pos, arg_shape[2], out_shape[2]);
}
else if (arg_shape.size() == 4)
{
adaptive_max_pool_2d<T>(arg_pos,
out_pos,
sel_ind_pos,
arg_shape[2],
out_shape[2],
arg_shape[3],
out_shape[3]);
}
else if (arg_shape.size() == 5)
{
adaptive_max_pool_3d<T>(arg_pos,
out_pos,
sel_ind_pos,
arg_shape[2],
out_shape[2],
arg_shape[3],
out_shape[3],
arg_shape[4],
out_shape[4]);
}
}
}
}
} // namespace reference
} // namespace runtime
} // namespace ngraph

View File

@ -355,6 +355,8 @@ set(MULTI_TEST_SRC
backend/abs.in.cpp
backend/acos.in.cpp
backend/acosh.in.cpp
backend/adaptive_avg_pool.in.cpp
backend/adaptive_max_pool.in.cpp
backend/add.in.cpp
backend/aliased_output.in.cpp
backend/api.in.cpp

View File

@ -0,0 +1,131 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "util/engine/test_engines.hpp"
#include "util/test_case.hpp"
#include "util/test_control.hpp"
using namespace std;
using namespace ngraph;
static string s_manifest = "${MANIFEST}";
using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME});
NGRAPH_TEST(${BACKEND_NAME}, adaptive_avg_pool_1d)
{
auto data = make_shared<op::Parameter>(element::f32, Shape{2, 3, 7});
auto output_shape = op::Constant::create<int64_t>(element::i64, Shape{1}, {3});
auto adaptive_pool = make_shared<op::v8::AdaptiveAvgPool>(data, output_shape);
auto fun = make_shared<Function>(OutputVector{adaptive_pool}, ParameterVector{data});
std::vector<float> inputs{ 0, 4, 1, 3, -2, -5, -2,
-2, 1, -3, 1, -3, -4, 0,
-2, 1, -1, -2, 3, -1, -3,
-1, -2, 3, 4, -3, -4, 1,
2, 0, -4, -5, -2, -2, -3,
2, 3, 1, -5, 2, -4, -2};
std::vector<float> expected_result{ 1.66666663, 0.66666669, -3. ,
-1.33333337, -1.66666663, -2.33333325,
-0.66666669, 0. , -0.33333334,
0. , 1.33333337, -2. ,
-0.66666669, -3.66666675, -2.33333325,
2. , -0.66666669, -1.33333337};
auto test_case = test::TestCase<TestEngine>(fun);
test_case.add_input<float>(Shape{2, 3, 7}, inputs);
test_case.add_expected_output<float>(Shape{2, 3, 3}, expected_result);
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, adaptive_avg_pool_2d)
{
auto data = make_shared<op::Parameter>(element::f32, Shape{1, 3, 7, 10});
auto output_shape = op::Constant::create<int64_t>(element::i64, Shape{2}, {3, 3});
auto adaptive_pool = make_shared<op::v8::AdaptiveAvgPool>(data, output_shape);
auto fun = make_shared<Function>(OutputVector{adaptive_pool}, ParameterVector{data});
std::vector<float> inputs{-2, -3, -4, 3, -5, 4, 0, -4, -2, -4,
-5, 0, -3, 0, -2, 0, 0, -5, -4, -1,
3, -1, 0, -1, 0, -2, 0, 4, 1, 4,
0, -1, -4, 2, -2, -5, -1, -1, -2, 1,
2, -2, -1, 2, 0, -1, 0, -5, 4, 4,
3, 0, -4, -4, -4, -2, 0, 1, -2, -1,
4, -2, -4, 1, -1, -3, -4, -1, 1, -4,
-2, -4, -5, 0, -4, 3, 4, -5, -4, -2,
0, 2, -4, -3, 3, -1, 1, -4, -5, 4,
2, -5, 2, -3, 0, 4, 3, 3, 1, 2,
-1, -4, 1, -3, -3, -2, 3, 4, -2, -5,
1, 4, 4, -2, 2, 1, -5, -2, -5, 1,
1, -2, -3, -3, -1, -5, 1, -3, -5, -3,
-4, -1, 4, -3, 4, -1, 4, 3, 1, 4,
-2, -4, -4, 4, -3, 4, 2, -3, -2, 4,
-3, 0, 1, -4, 4, 4, 0, 3, -1, 3,
3, -5, 0, 3, -3, 1, -2, 4, -5, -5,
1, 0, -1, 0, -3, -2, 0, -3, 3, -2,
-2, 0, -3, 4, -1, 2, -2, 2, -3, -1,
-4, -2, 0, 2, 0, 2, 0, -3, 4, 3,
-5, -3, -5, 1, -5, -3, -5, 4, -3, 3};
std::vector<float> expected_result{-1.08333337, -0.25000000, -0.91666669,
-0.08333334, -0.66666669, 0.75000000,
-0.41666666, -1.33333337, -0.58333331,
-1.66666663, 0.58333331, -0.16666667,
-0.33333334, -0.41666666, -0.16666667,
-0.33333334, -0.66666669, -0.75000000,
-0.91666669, 0.83333331, -0.16666667,
0. , -0.25000000, -1.16666663,
-1.41666663, -0.41666666, -0.08333334};
auto test_case = test::TestCase<TestEngine>(fun);
test_case.add_input<float>(Shape{1, 3, 7, 10}, inputs);
test_case.add_expected_output<float>(Shape{1, 3, 3, 3}, expected_result);
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, adaptive_avg_pool_3d)
{
auto data = make_shared<op::Parameter>(element::f32, Shape{2, 2, 3, 3, 3});
auto output_shape = op::Constant::create<int64_t>(element::i64, Shape{3}, {2, 2, 2});
auto adaptive_pool = make_shared<op::v8::AdaptiveAvgPool>(data, output_shape);
auto fun = make_shared<Function>(OutputVector{adaptive_pool}, ParameterVector{data});
std::vector<float> inputs{-5, 1, -3, -4, 4, -4, 3, -3, -1,
0, 0, -2, -4, 2, 0, -4, -5, -2,
-4, -4, 0, -2, 3, -3, 4, -1, -4,
-1, -1, -5, 4, -1, -2, -3, 0, 4,
-1, -5, -4, 1, 1, 4, -5, -5, -5,
4, -3, -3, -3, 4, 0, -3, -5, 1,
4, 2, 1, -5, -5, 1, 0, -4, -1,
2, -4, -2, 4, 3, 1, -3, -3, -2,
-4, -3, -3, 3, -1, 1, 2, 2, -4,
-5, -4, 1, 3, -4, -1, 2, 4, -5,
0, 1, -2, 0, 0, -2, 3, -2, -5,
-3, -5, -2, -1, 3, -2, 4, 3, -3};
std::vector<float> expected_result{-0.750, -0.250, -1.375, -1.125,
-1.125, -0.500, -0.875, -1.250,
-0.375, -1.625, -1. , -0.500,
-0.250, -0.750, -1.875, -0.625,
0.125, -0.375, -1.625, -1.250,
0. , -1. , 0.875, -0.375,
-1.125, -1.375, 0.750, -1.875,
-0.625, -1.125, 1.250, -1.};
auto test_case = test::TestCase<TestEngine>(fun);
test_case.add_input<float>(Shape{2, 2, 3, 3, 3}, inputs);
test_case.add_expected_output<float>(Shape{2, 2, 2, 2, 2}, expected_result);
test_case.run();
}

View File

@ -0,0 +1,161 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "util/engine/test_engines.hpp"
#include "util/test_case.hpp"
#include "util/test_control.hpp"
using namespace std;
using namespace ngraph;
static string s_manifest = "${MANIFEST}";
using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME});
NGRAPH_TEST(${BACKEND_NAME}, adaptive_max_pool_1d)
{
auto data = make_shared<op::Parameter>(element::f32, Shape{2, 3, 7});
auto output_shape = op::Constant::create<int64_t>(element::i64, Shape{1}, {3});
auto adaptive_pool = make_shared<op::v8::AdaptiveMaxPool>(data, output_shape);
auto fun = make_shared<Function>(adaptive_pool->outputs(), ParameterVector{data});
std::vector<float> inputs{ 0, 4, 1, 3, -2, -5, -2,
-2, 1, -3, 1, -3, -4, 0,
-2, 1, -1, -2, 3, -1, -3,
-1, -2, 3, 4, -3, -4, 1,
2, 0, -4, -5, -2, -2, -3,
2, 3, 1, -5, 2, -4, -2};
std::vector<float> expected_result{ 4, 3, -2,
1, 1, 0,
1, 3, 3,
3, 4, 1,
2, -2, -2,
3, 2, 2};
std::vector<int64_t> expected_indices{1, 3, 4,
1, 3, 6,
1, 4, 4,
2, 3, 6,
0, 4, 4,
1, 4, 4};
auto test_case = test::TestCase<TestEngine>(fun);
test_case.add_input<float>(Shape{2, 3, 7}, inputs);
test_case.add_expected_output<float>(Shape{2, 3, 3}, expected_result);
test_case.add_expected_output<int64_t>(Shape{2, 3, 3}, expected_indices);
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, adaptive_max_pool_2d)
{
auto data = make_shared<op::Parameter>(element::f32, Shape{1, 3, 7, 10});
auto output_shape = op::Constant::create<int64_t>(element::i64, Shape{2}, {3, 3});
auto adaptive_pool = make_shared<op::v8::AdaptiveMaxPool>(data, output_shape);
auto fun = make_shared<Function>(adaptive_pool->outputs(), ParameterVector{data});
std::vector<float> inputs{ 0, -2, -5, -5, 2, 3, 2, -3, 1, -2,
-4, -1, -1, -1, 2, -4, 3, -5, -1, -1,
1, 2, 4, -2, -3, -2, 0, -5, 2, -4,
-1, -4, 4, 2, 1, -2, 2, -3, 0, 1,
-3, 3, -1, 4, 0, 2, 0, 3, 4, -4,
1, 4, -1, -5, -2, 4, -3, 3, 2, 1,
0, 4, 2, -5, 2, -5, -2, -1, 4, 2,
0, 4, -2, 0, -5, -3, 4, -4, -2, -2,
2, 1, 4, 3, 2, -5, -4, -4, 0, 1,
4, -4, -3, 3, 3, 4, -2, -3, -4, -2,
0, 1, -1, 3, -2, 2, 0, -3, -1, -1,
0, 0, 2, 2, -2, 1, -3, 1, 2, 4,
3, -5, -4, 1, -4, 2, 0, -2, -5, 2,
-3, -2, -3, -4, 2, -2, -4, 2, -4, -3,
1, -5, -1, -5, 2, 1, 3, 4, 3, 0,
-5, 4, -3, -4, -1, 2, -4, 2, 0, -5,
-3, 0, 2, -3, -5, 3, -2, -1, -5, -4,
-5, 0, -5, -1, -3, 3, 3, -4, -3, -4,
-5, 4, -1, 1, -1, -4, 1, -3, -4, -1,
-2, -3, -5, 2, 2, -5, 1, 1, -5, -4,
0, 2, 4, 2, 0, 2, 4, 0, -5, 2};
std::vector<float> expected_result{4, 3, 3,
4, 4, 4,
4, 4, 4,
4, 4, 4,
4, 4, 4,
3, 2, 4,
4, 3, 4,
4, 3, 3,
4, 4, 4};
std::vector<int64_t> expected_indices{22, 5 , 16,
22, 43, 48,
43, 43, 48,
1 , 6 , 6 ,
20, 25, 49,
50, 43, 49,
11, 6 , 7 ,
41, 25, 36,
41, 66, 66};
auto test_case = test::TestCase<TestEngine>(fun);
test_case.add_input<float>(Shape{1, 3, 7, 10}, inputs);
test_case.add_expected_output<float>(Shape{1, 3, 3, 3}, expected_result);
test_case.add_expected_output<int64_t>(Shape{1, 3, 3, 3}, expected_indices);
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, adaptive_max_pool_3d)
{
auto data = make_shared<op::Parameter>(element::f32, Shape{2, 2, 3, 3, 3});
auto output_shape = op::Constant::create<int64_t>(element::i64, Shape{3}, {2, 2, 2});
auto adaptive_pool = make_shared<op::v8::AdaptiveMaxPool>(data, output_shape);
auto fun = make_shared<Function>(adaptive_pool->outputs(), ParameterVector{data});
std::vector<float> inputs{-5, 1, -3, -4, 4, -4, 3, -3, -1,
0, 0, -2, -4, 2, 0, -4, -5, -2,
-4, -4, 0, -2, 3, -3, 4, -1, -4,
-1, -1, -5, 4, -1, -2, -3, 0, 4,
-1, -5, -4, 1, 1, 4, -5, -5, -5,
4, -3, -3, -3, 4, 0, -3, -5, 1,
4, 2, 1, -5, -5, 1, 0, -4, -1,
2, -4, -2, 4, 3, 1, -3, -3, -2,
-4, -3, -3, 3, -1, 1, 2, 2, -4,
-5, -4, 1, 3, -4, -1, 2, 4, -5,
0, 1, -2, 0, 0, -2, 3, -2, -5,
-3, -5, -2, -1, 3, -2, 4, 3, -3};
std::vector<float> expected_result{4, 4, 4, 4,
3, 3, 4, 3,
4, 4, 4, 4,
4, 4, 4, 4,
4, 3, 4, 3,
4, 3, 4, 3,
3, 1, 4, 4,
3, 3, 4, 3};
std::vector<int64_t> expected_indices{4 , 4 , 4 , 4 ,
22, 22, 24, 22,
3 , 14, 3 , 8 ,
18, 14, 22, 14,
0 , 13, 12, 13,
12, 13, 12, 13,
3 , 2 , 7 , 7 ,
22, 22, 24, 22};
auto test_case = test::TestCase<TestEngine>(fun);
test_case.add_input<float>(Shape{2, 2, 3, 3, 3}, inputs);
test_case.add_expected_output<float>(Shape{2, 2, 2, 2, 2}, expected_result);
test_case.add_expected_output<int64_t>(Shape{2, 2, 2, 2, 2}, expected_indices);
test_case.run();
}

View File

@ -1623,3 +1623,11 @@ IE_CPU.deformable_convolution_2D_integral_offsets_groups_and_deforgroups
IE_CPU.deformable_convolution_2D_real_offsets_groups_basic
IE_CPU.deformable_convolution_2D_real_offsets_groups_complex
IE_CPU.deformable_convolution_2D_real_offsets_groups_and_deforgroups
# No plugin support for AdaptiveAvgPool and AdaptiveMaxPool
adaptive_avg_pool_1d
adaptive_avg_pool_2d
adaptive_avg_pool_3d
adaptive_max_pool_1d
adaptive_max_pool_2d
adaptive_max_pool_3d

View File

@ -8,6 +8,8 @@
#include "ngraph/ops.hpp"
#include <ngraph/runtime/reference/abs.hpp>
#include <ngraph/runtime/reference/adaptive_avg_pool.hpp>
#include <ngraph/runtime/reference/adaptive_max_pool.hpp>
#include <ngraph/runtime/reference/avg_pool.hpp>
#include <ngraph/runtime/reference/batch_norm.hpp>
#include <ngraph/runtime/reference/binary_convolution.hpp>
@ -2539,6 +2541,33 @@ namespace
return true;
}
template <element::Type_t ET>
bool evaluate(const shared_ptr<op::v8::AdaptiveAvgPool>& op,
const HostTensorVector& outputs,
const HostTensorVector& inputs)
{
using T = typename element_type_traits<ET>::value_type;
runtime::reference::adaptive_avg_pool(inputs[0]->get_data_ptr<T>(),
outputs[0]->get_data_ptr<T>(),
inputs[0]->get_shape(),
op->get_output_shape(0));
return true;
}
template <element::Type_t ET>
bool evaluate(const shared_ptr<op::v8::AdaptiveMaxPool>& op,
const HostTensorVector& outputs,
const HostTensorVector& inputs)
{
using T = typename element_type_traits<ET>::value_type;
runtime::reference::adaptive_max_pool(inputs[0]->get_data_ptr<T>(),
outputs[0]->get_data_ptr<T>(),
outputs[1]->get_data_ptr<int64_t>(),
inputs[0]->get_shape(),
op->get_output_shape(0));
return true;
}
template <typename T>
bool evaluate_node(std::shared_ptr<Node> node,
const HostTensorVector& outputs,
@ -2556,7 +2585,9 @@ namespace
for (size_t i = 1; i < node->outputs().size(); i++)
{
if ((is_type<op::v5::NonMaxSuppression>(node) ||
is_type<op::v6::ExperimentalDetectronDetectionOutput>(node)) && i == 1)
is_type<op::v6::ExperimentalDetectronDetectionOutput>(node) ||
is_type<op::v8::AdaptiveMaxPool>(node)) &&
i == 1)
{
continue;
}

View File

@ -96,3 +96,6 @@ NGRAPH_OP(DFT, op::v7)
NGRAPH_OP(Einsum, op::v7)
NGRAPH_OP(IDFT, op::v7)
NGRAPH_OP(Roll, ngraph::op::v7)
NGRAPH_OP(AdaptiveAvgPool, ngraph::op::v8)
NGRAPH_OP(AdaptiveMaxPool, ngraph::op::v8)