MaxPool-8 evaluate() (#7363)

This commit is contained in:
Tomasz Dołbniak
2021-09-03 20:22:07 +02:00
committed by GitHub
parent 781dcdf571
commit b86984fb30
5 changed files with 147 additions and 43 deletions

View File

@@ -120,6 +120,9 @@ public:
m_axis = axis;
}
bool has_evaluate() const override;
bool evaluate(const HostTensorVector&, const HostTensorVector&) const override;
private:
Strides m_dilations;
element::Type m_index_element_type{element::i64};

View File

@@ -49,6 +49,13 @@ OV_ITT_DOMAIN(SIMPLE_ngraph_pass);
} \
} break
#define NGRAPH_2_TYPES_CASE(region, a, b, ...) \
case element::Type_t::a: { \
OV_SCOPE(ngraph_op, OV_PP_CAT4(region, _, a, b)) { \
rc = evaluate<element::Type_t::a, element::Type_t::b>(__VA_ARGS__); \
} \
} break
#define NGRAPH_COPY_TENSOR(region, a, ...) \
case ov::element::Type_t::a: { \
OV_SCOPE(ngraph_op, OV_PP_CAT3(region, _, a)) { \

View File

@@ -160,6 +160,96 @@ bool op::v1::MaxPool::has_evaluate() const {
// ------------------------------ V8 ------------------------------
namespace maxpool_v8 {
template <element::Type_t Values, element::Type_t Indices>
inline bool evaluate(const HostTensorPtr& data,
const HostTensorPtr& values,
const HostTensorPtr& indices,
const Shape& out_shape,
const Shape& kernel,
const Strides& strides,
const Strides& dilations,
const Shape& pads_begin,
const Shape& pads_end,
const int64_t axis) {
using Values_t = typename element_type_traits<Values>::value_type;
using Indices_t = typename element_type_traits<Indices>::value_type;
runtime::reference::max_pool<Values_t, Indices_t>(data->get_data_ptr<Values_t>(),
values->get_data_ptr<Values_t>(),
indices->get_data_ptr<Indices_t>(),
data->get_shape(),
out_shape,
kernel,
strides,
dilations,
pads_begin,
pads_end,
axis);
return true;
}
bool evaluate_maxpool(const HostTensorPtr& data,
const HostTensorPtr& values,
const HostTensorPtr& indices,
const Shape& out_shape,
const Shape& kernel,
const Strides& strides,
const Strides& dilations,
const Shape& pads_begin,
const Shape& pads_end,
const int64_t axis) {
#define EVAL_MAX_POOL_8(data_et, index_et) \
NGRAPH_2_TYPES_CASE(maxpool_v8::evaluate_maxpool, \
data_et, \
index_et, \
data, \
values, \
indices, \
out_shape, \
kernel, \
strides, \
dilations, \
pads_begin, \
pads_end, \
axis)
bool rc = true;
switch (indices->get_element_type()) {
case element::Type_t::i32: {
switch (data->get_element_type()) {
EVAL_MAX_POOL_8(i32, i32);
EVAL_MAX_POOL_8(i64, i32);
EVAL_MAX_POOL_8(u32, i32);
EVAL_MAX_POOL_8(u64, i32);
EVAL_MAX_POOL_8(f16, i32);
EVAL_MAX_POOL_8(f32, i32);
default:
rc = false;
break;
}
} break;
case element::Type_t::i64: {
switch (data->get_element_type()) {
EVAL_MAX_POOL_8(i32, i64);
EVAL_MAX_POOL_8(i64, i64);
EVAL_MAX_POOL_8(u32, i64);
EVAL_MAX_POOL_8(u64, i64);
EVAL_MAX_POOL_8(f16, i64);
EVAL_MAX_POOL_8(f32, i64);
default:
rc = false;
break;
}
} break;
default:
rc = false;
break;
}
return rc;
}
} // namespace maxpool_v8
NGRAPH_RTTI_DEFINITION(op::v8::MaxPool, "MaxPool", 8, op::util::MaxPoolBase);
op::v8::MaxPool::MaxPool(const Output<Node>& arg,
@@ -223,3 +313,50 @@ shared_ptr<Node> op::v8::MaxPool::clone_with_new_inputs(const OutputVector& new_
m_index_element_type,
m_axis);
}
bool op::v8::MaxPool::has_evaluate() const {
NGRAPH_OP_SCOPE(v8_MaxPool_has_evaluate);
switch (get_input_element_type(0)) {
case ngraph::element::i32:
case ngraph::element::i64:
case ngraph::element::u32:
case ngraph::element::u64:
case ngraph::element::f16:
case ngraph::element::f32:
return true;
default:
break;
}
return false;
}
bool op::v8::MaxPool::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const {
NGRAPH_OP_SCOPE(v8_MaxPool_evaluate);
const auto arg_shape = inputs[0]->get_partial_shape();
auto pads_begin_s = get_pads_begin();
auto pads_end_s = get_pads_end();
update_auto_padding(arg_shape, get_dilations(), pads_begin_s, pads_end_s);
CoordinateDiff pads_begin(pads_begin_s.begin(), pads_begin_s.end());
CoordinateDiff pads_end(pads_end_s.begin(), pads_end_s.end());
auto out_shape = infer_batched_pooling_forward(this,
arg_shape,
pads_begin,
pads_end,
get_kernel(),
get_strides(),
true,
get_rounding_type() == op::RoundingType::CEIL,
get_dilations());
return maxpool_v8::evaluate_maxpool(inputs[0],
outputs[0],
outputs[1],
out_shape.get_shape(),
get_kernel(),
get_strides(),
get_dilations(),
get_pads_begin(),
get_pads_end(),
get_axis());
}

View File

@@ -50,7 +50,6 @@
#include <ngraph/runtime/reference/lrn.hpp>
#include <ngraph/runtime/reference/lstm_cell.hpp>
#include <ngraph/runtime/reference/matrix_nms.hpp>
#include <ngraph/runtime/reference/max_pool.hpp>
#include <ngraph/runtime/reference/mod.hpp>
#include <ngraph/runtime/reference/multiclass_nms.hpp>
#include <ngraph/runtime/reference/mvn.hpp>
@@ -2923,47 +2922,6 @@ namespace
return true;
}
template <element::Type_t ET>
bool evaluate(const shared_ptr<op::v8::MaxPool>& op,
const HostTensorVector& outputs,
const HostTensorVector& inputs)
{
using T = typename element_type_traits<ET>::value_type;
if (op->get_index_element_type() == element::i32)
{
runtime::reference::max_pool(inputs[0]->get_data_ptr<const T>(),
outputs[0]->get_data_ptr<T>(),
outputs[1]->get_data_ptr<int32_t>(),
inputs[0]->get_shape(),
outputs[0]->get_shape(),
op->get_kernel(),
op->get_strides(),
op->get_dilations(),
op->get_pads_begin(),
op->get_pads_end(),
op->get_axis());
}
else if (op->get_index_element_type() == element::i64)
{
runtime::reference::max_pool(inputs[0]->get_data_ptr<const T>(),
outputs[0]->get_data_ptr<T>(),
outputs[1]->get_data_ptr<int64_t>(),
inputs[0]->get_shape(),
outputs[0]->get_shape(),
op->get_kernel(),
op->get_strides(),
op->get_dilations(),
op->get_pads_begin(),
op->get_pads_end(),
op->get_axis());
}
else
{
return false;
}
return true;
}
template <typename T>
bool evaluate_node(std::shared_ptr<Node> node,
const HostTensorVector& outputs,

View File

@@ -101,5 +101,4 @@ NGRAPH_OP(AdaptiveAvgPool, ngraph::op::v8)
NGRAPH_OP(AdaptiveMaxPool, ngraph::op::v8)
NGRAPH_OP(Gather, op::v8)
NGRAPH_OP(MatrixNms, op::v8)
NGRAPH_OP(MaxPool, op::v8)
NGRAPH_OP(MulticlassNms, op::v8)