[FrontEnd]enable pdpd ops conversion part3 (#6636)

* [FrontEnd]enable pdpd ops conversion part3

* Add adaptive pool2d op conversion (#1)

* param support tensor (#2)

* add missing sync_batch_norm

* Update pow.cpp

* deal empty axis (#5)

* deal empty axis

* apply review comments

* fix code style

* fix code style

* change shape to i32

* fix code in shape

* fix code style

* fix paddle code style

* remove redandent ops

* fix maxAdativePool

* fix expand_v2

* remove redandent code

Co-authored-by: Mang Guo <mang.guo@intel.com>
Co-authored-by: Luo Cheng <cheng.luo@intel.com>
This commit is contained in:
Zhang Yi 2021-08-04 16:04:19 +08:00 committed by GitHub
parent 6f23458534
commit 0aa6b07628
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
16 changed files with 1358 additions and 50 deletions

View File

@ -61,7 +61,11 @@ namespace ngraph
fixed_shape_node, input_shape_node, false);
return node.default_single_output_mapping(
{std::make_shared<ngraph::opset6::Tile>(x, repeated_node)}, {"Out"});
{std::make_shared<ngraph::opset6::Tile>(
x,
std::make_shared<ngraph::opset6::Convert>(repeated_node,
element::i64))},
{"Out"});
}
} // namespace op

View File

@ -0,0 +1,67 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <ngraph/opsets/opset6.hpp>
#include <node_context.hpp>
namespace ngraph
{
namespace frontend
{
namespace pdpd
{
namespace op
{
NamedOutputs mul(const NodeContext& node)
{
auto x = node.get_ng_input("X");
auto y = node.get_ng_input("Y");
PDPD_OP_VALIDATION_CHECK(node,
x.get_partial_shape().rank().is_static(),
"matmul: X rank must be static!");
int64_t x_rank = x.get_partial_shape().rank().get_length();
PDPD_OP_VALIDATION_CHECK(node,
y.get_partial_shape().rank().is_static() &&
y.get_partial_shape().rank().get_length() == 2,
"matmul: Y rank must be static, and 2!");
if (x_rank > 2)
{
auto shape = std::make_shared<ngraph::opset6::ShapeOf>(x);
int64_t x_num_col_dims = node.get_attribute<int32_t>("x_num_col_dims");
auto axis = ngraph::opset6::Constant::create(ngraph::element::i64, {}, {0});
auto split_lengths = ngraph::opset6::Constant::create(
ngraph::element::i64, {2}, {x_num_col_dims, x_rank - x_num_col_dims});
auto split = std::make_shared<ngraph::opset6::VariadicSplit>(
shape, axis, split_lengths);
auto f_dim_red_axis =
ngraph::opset6::Constant::create(ngraph::element::i64, {}, {0});
auto first_dim_reduce = std::make_shared<ngraph::opset6::ReduceProd>(
split->output(0), f_dim_red_axis);
auto f_dim_shape =
ngraph::opset6::Constant::create(ngraph::element::i64, {1}, {1});
auto first_dim = std::make_shared<ngraph::opset6::Reshape>(
first_dim_reduce, f_dim_shape, false);
auto s_dim_red_axis =
ngraph::opset6::Constant::create(ngraph::element::i64, {}, {0});
auto second_dim_reduce = std::make_shared<ngraph::opset6::ReduceProd>(
split->output(1), s_dim_red_axis);
auto s_dim_shape =
ngraph::opset6::Constant::create(ngraph::element::i64, {1}, {1});
auto second_dim = std::make_shared<ngraph::opset6::Reshape>(
second_dim_reduce, s_dim_shape, false);
auto out_shape = std::make_shared<ngraph::opset6::Concat>(
ngraph::NodeVector{first_dim, second_dim}, 0);
auto x_reshaped =
std::make_shared<ngraph::opset6::Reshape>(x, out_shape, false);
return node.default_single_output_mapping(
{std::make_shared<ngraph::opset6::MatMul>(x_reshaped, y)}, {"Out"});
}
return node.default_single_output_mapping(
{std::make_shared<ngraph::opset6::MatMul>(x, y)}, {"Out"});
}
} // namespace op
} // namespace pdpd
} // namespace frontend
} // namespace ngraph

View File

@ -0,0 +1,117 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <ngraph/opsets/opset6.hpp>
#include <node_context.hpp>
namespace ngraph
{
namespace frontend
{
namespace pdpd
{
namespace op
{
NamedOutputs pad3d(const NodeContext& node)
{
auto data = node.get_ng_input("X");
auto mode = node.get_attribute<std::string>("mode");
auto value = node.get_attribute<float>("value", 0.0);
auto data_format = node.get_attribute<std::string>("data_format");
auto paddings = std::vector<int32_t>(6, 0);
// padding of type int feature only supported by PaddlePaddle 'develop'
// version(>=2.1.0)
if (node.has_attribute<std::vector<int32_t>>("paddings"))
{
auto paddings_vector = node.get_attribute<std::vector<int32_t>>("paddings");
PDPD_OP_VALIDATION_CHECK(node,
paddings_vector.size() == 6,
"paddings Params size should be 6 in pad3d!");
paddings = paddings_vector;
}
else if (node.has_attribute<int32_t>("paddings"))
{
auto padding_int = node.get_attribute<int32_t>("paddings");
for (int i = 0; i < 6; i++)
paddings[i] = padding_int;
}
else
{
throw ngraph::ngraph_error("Unsupported paddings attribute!");
}
auto pads_begin = std::vector<int32_t>(5, 0);
auto pads_end = std::vector<int32_t>(5, 0);
Output<ngraph::Node> values;
Output<ngraph::Node> padding_begin;
Output<ngraph::Node> padding_end;
ngraph::op::PadMode pad_mode;
// TODO Support Circular mode in #55704
if (mode == "constant")
{
pad_mode = ngraph::op::PadMode::CONSTANT;
values = ngraph::opset6::Constant::create(
element::f32, ngraph::Shape{}, {value});
}
else if (mode == "reflect")
{
pad_mode = ngraph::op::PadMode::REFLECT;
}
else if (mode == "replicate")
{
pad_mode = ngraph::op::PadMode::EDGE;
}
else
{
throw ngraph::ngraph_error("Unsupported 3d paddings mode: [" + mode + "]");
}
if (data_format == "NCDHW")
{
pads_begin[4] = paddings[0]; // left
pads_end[4] = paddings[1]; // right
pads_begin[3] = paddings[2]; // top
pads_end[3] = paddings[3]; // down
pads_begin[2] = paddings[4]; // front
pads_end[2] = paddings[5]; // back
}
else if (data_format == "NDHWC")
{
pads_begin[3] = paddings[0]; // left
pads_end[3] = paddings[1]; // right
pads_begin[2] = paddings[2]; // top
pads_end[2] = paddings[3]; // down
pads_begin[1] = paddings[4]; // front
pads_end[1] = paddings[5]; // back
}
else
{
throw ngraph::ngraph_error("Unsupported 3d paddings data_format: [" +
data_format + "]");
}
padding_begin = ngraph::opset6::Constant::create(
element::i32, ngraph::Shape{pads_begin.size()}, pads_begin);
padding_end = ngraph::opset6::Constant::create(
element::i32, ngraph::Shape{pads_end.size()}, pads_end);
if (mode == "constant")
return node.default_single_output_mapping(
{std::make_shared<ngraph::opset6::Pad>(
data, padding_begin, padding_end, values, pad_mode)},
{"Out"});
else
return node.default_single_output_mapping(
{std::make_shared<ngraph::opset6::Pad>(
data, padding_begin, padding_end, pad_mode)},
{"Out"});
}
} // namespace op
} // namespace pdpd
} // namespace frontend
} // namespace ngraph

View File

@ -0,0 +1,249 @@
//*****************************************************************************
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
//*****************************************************************************
#include <ngraph/opsets/opset6.hpp>
#include <ngraph/opsets/opset8.hpp>
#include <node_context.hpp>
namespace ngraph
{
namespace frontend
{
namespace pdpd
{
namespace op
{
// helper func - get pad_begin and pad_end
static void get_paddings(const NodeContext& node,
ngraph::Shape& pad_begin,
ngraph::Shape& pad_end,
ngraph::op::PadType& auto_pad)
{
if (node.has_attribute<std::string>("padding_algorithm"))
{
auto pad_algo = node.get_attribute<std::string>("padding_algorithm");
if (pad_algo == "SAME")
{
auto_pad = ngraph::op::PadType::SAME_UPPER;
}
else if (pad_algo == "VALID")
{
auto_pad = ngraph::op::PadType::VALID;
}
else if (pad_algo == "EXPLICIT")
{
auto_pad = ngraph::op::PadType::EXPLICIT;
}
else
{
throw std::runtime_error("Unsupported pooling padding_algorithm " +
pad_algo);
}
}
else
{
// adaptive_maxpool with no such attr.
auto_pad = ngraph::op::PadType::EXPLICIT;
}
/*If pool padding size is a tuple or list, it could be in three forms:
[pad_height, pad_width] or [pad_height_top, pad_height_bottom, pad_width_left,
pad_width_right], and when data_format is NCHW, pool_padding can be in the
form [[0,0], [0,0], [pad_height_top, pad_height_bottom], [pad_width_left,
pad_width_right]]. when data_format is NHWC, pool_padding can be in the form
[[0,0], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right],
[0,0]]. Otherwise, the pool padding size will be a square of an int.*/
auto paddings = node.get_attribute<std::vector<int32_t>>("paddings");
// Default is empty for 'adaptive max pooling'
auto data_format = node.get_attribute<std::string>("data_format", {});
// TODO: need to support NHWC input #55483
switch (paddings.size())
{
case 2:
pad_begin = Shape{static_cast<uint64_t>(paddings[0]),
static_cast<uint64_t>(paddings[1])};
pad_end = pad_begin;
break;
case 4:
pad_begin = Shape{static_cast<uint64_t>(paddings[0]),
static_cast<uint64_t>(paddings[2])};
pad_end = Shape{static_cast<uint64_t>(paddings[1]),
static_cast<uint64_t>(paddings[3])};
break;
default:
throw std::runtime_error("Unsupported pooling paddings " +
std::to_string(paddings.size()));
}
}
NamedOutputs pool2d(const NodeContext& node)
{
auto data = node.get_ng_input("X");
auto pooling_type = node.get_attribute<std::string>("pooling_type", {});
auto global_pooling = node.get_attribute<bool>("global_pooling");
auto adaptive = node.get_attribute<bool>("adaptive");
auto kernel_shape = node.get_attribute<std::vector<int32_t>>("ksize");
auto rounding_type = node.get_attribute<bool>("ceil_mode", false)
? ngraph::op::RoundingType::CEIL
: ngraph::op::RoundingType::FLOOR;
if (pooling_type.empty())
{
pooling_type = "max";
}
PDPD_ASSERT((pooling_type == "max") || (pooling_type == "avg"),
"pool2d: not supported pooling type !");
PDPD_ASSERT(kernel_shape.size() == 1 || kernel_shape.size() == 2,
"pool2d: ksize must be 1 or 2!");
PartialShape input_shape = data.get_partial_shape();
int32_t input_rank = input_shape.rank().get_length();
PDPD_ASSERT(input_rank >= 2, "input tensor rank must be greater than 2");
auto auto_pad = ngraph::op::PadType::EXPLICIT;
ngraph::Shape pad_begin, pad_end;
get_paddings(node, pad_begin, pad_end, auto_pad);
if (global_pooling ||
(adaptive && std::any_of(kernel_shape.begin(),
kernel_shape.end(),
[](int32_t i) { return i == 1; })))
{
if (pooling_type == "max")
{
auto axes = ngraph::opset6::Constant::create(
ngraph::element::i64, {2}, {input_rank - 2, input_rank - 1});
return node.default_single_output_mapping(
{std::make_shared<ngraph::opset6::ReduceMax>(data, axes, true)},
{"Out"});
}
else
{
auto axes = ngraph::opset6::Constant::create(
ngraph::element::i64, {2}, {input_rank - 2, input_rank - 1});
return node.default_single_output_mapping(
{std::make_shared<ngraph::opset6::ReduceMean>(data, axes, true)},
{"Out"});
}
}
else if (adaptive)
{
PDPD_ASSERT(input_shape[2].is_static() && input_shape[3].is_static(),
"pool2d: spatial dim must be static when using adaptive pool");
auto pool_size = std::vector<int64_t>(2, 0);
if (kernel_shape.size() == 1)
{
// Not tested: implemented according to spec, but can't generate real
// model to test
pool_size[0] = pool_size[1] = kernel_shape[0];
}
else
{
pool_size[0] = kernel_shape[0];
pool_size[1] = kernel_shape[1];
}
const Output<ngraph::Node> output_shape = ngraph::opset6::Constant::create(
ngraph::element::i64, {pool_size.size()}, pool_size);
if (pooling_type == "max")
{
std::vector<Output<Node>> pool_outputs;
pool_outputs = std::make_shared<ngraph::opset8::AdaptiveMaxPool>(
data, output_shape, ngraph::element::i32)
->outputs();
NamedOutputs outputs;
outputs["Out"] = {pool_outputs[0]};
outputs["Mask"] = {pool_outputs[1]};
return outputs;
}
else
{
return node.default_single_output_mapping(
{std::make_shared<ngraph::opset8::AdaptiveAvgPool>(data,
output_shape)},
{"Out"});
}
}
else
{
auto strides = node.get_attribute<std::vector<int32_t>>("strides");
auto paddings = node.get_attribute<std::vector<int32_t>>("paddings");
uint64_t kernel_h, kernel_w;
if (kernel_shape.size() == 1)
{
// Not tested: implemented according to spec, but can't generate real
// model to test
kernel_h = kernel_w = kernel_shape[0];
}
else
{
kernel_h = kernel_shape[0];
kernel_w = kernel_shape[1];
}
PDPD_ASSERT(kernel_h > 0 && kernel_w > 0,
"pool2d kernel shape must be greater than 0");
// Note: this shape check is only valid when the spatial dim of input_shape
// is static.
if (input_shape[2].is_static() && input_shape[3].is_static())
{
uint64_t input_h = input_shape[input_rank - 2].get_length();
uint64_t input_w = input_shape[input_rank - 1].get_length();
if ((input_h > 0) && (input_h + pad_begin[0] + pad_end[0] < kernel_h))
{
kernel_h = input_h + pad_begin[0] + pad_end[0];
}
if ((input_w > 0) && (input_w + pad_begin[1] + pad_end[1] < kernel_w))
{
kernel_w = input_w + pad_begin[1] + pad_end[1];
}
}
if (pooling_type == "max")
{
return node.default_single_output_mapping(
{std::make_shared<ngraph::opset6::MaxPool>(
data,
ngraph::Strides(strides.begin(), strides.end()),
pad_begin,
pad_end,
ngraph::Shape{kernel_h, kernel_w},
rounding_type,
auto_pad)},
{"Out"});
}
else
{
bool exclude_pad = node.get_attribute<bool>("exclusive", false);
return node.default_single_output_mapping(
{std::make_shared<ngraph::opset6::AvgPool>(
data,
ngraph::Strides(strides.begin(), strides.end()),
pad_begin,
pad_end,
ngraph::Shape{kernel_h, kernel_w},
exclude_pad,
rounding_type,
auto_pad)},
{"Out"});
}
}
}
} // namespace op
} // namespace pdpd
} // namespace frontend
} // namespace ngraph

View File

@ -0,0 +1,40 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <ngraph/opsets/opset6.hpp>
#include <node_context.hpp>
#include <paddlepaddle_frontend/utility.hpp>
namespace ngraph
{
namespace frontend
{
namespace pdpd
{
namespace op
{
NamedOutputs pow(const NodeContext& node)
{
auto x = node.get_ng_input("X");
auto dtype = x.get_element_type();
Output<Node> factor_node;
if (node.has_ng_input("FactorTensor"))
{
factor_node = node.get_ng_input("FactorTensor");
if (factor_node.get_element_type() != dtype)
factor_node = std::make_shared<opset6::Convert>(factor_node, dtype);
}
else
{
factor_node = ngraph::opset6::Constant::create(
dtype, Shape{1}, {node.get_attribute<float>("factor")});
}
return node.default_single_output_mapping(
{std::make_shared<ngraph::opset6::Power>(x, factor_node)}, {"Out"});
}
} // namespace op
} // namespace pdpd
} // namespace frontend
} // namespace ngraph

View File

@ -0,0 +1,37 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <ngraph/opsets/opset6.hpp>
#include <node_context.hpp>
namespace ngraph
{
namespace frontend
{
namespace pdpd
{
namespace op
{
NamedOutputs range(const NodeContext& node)
{
auto start = node.get_ng_input("Start");
auto stop = node.get_ng_input("End");
auto step = node.get_ng_input("Step");
auto type = node.get_out_port_type("Out");
const auto axis = ngraph::opset6::Constant::create(element::i64, Shape{}, {0});
auto start_scalar = std::make_shared<ngraph::opset6::Squeeze>(start, axis);
auto stop_scalar = std::make_shared<ngraph::opset6::Squeeze>(stop, axis);
auto step_scalar = std::make_shared<ngraph::opset6::Squeeze>(step, axis);
return node.default_single_output_mapping(
{std::make_shared<ngraph::opset6::Range>(
start_scalar, stop_scalar, step_scalar, type)},
{"Out"});
}
} // namespace op
} // namespace pdpd
} // namespace frontend
} // namespace ngraph

View File

@ -0,0 +1,28 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <ngraph/opsets/opset6.hpp>
#include <node_context.hpp>
#include <paddlepaddle_frontend/utility.hpp>
namespace ngraph
{
namespace frontend
{
namespace pdpd
{
namespace op
{
NamedOutputs relu6(const NodeContext& node)
{
auto data = node.get_ng_input("X");
auto threshold = node.get_attribute<float>("threshold", 6.0f);
return node.default_single_output_mapping(
{std::make_shared<ngraph::opset6::Clamp>(data, 0.0, threshold)}, {"Out"});
}
} // namespace op
} // namespace pdpd
} // namespace frontend
} // namespace ngraph

View File

@ -0,0 +1,56 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <ngraph/opsets/opset6.hpp>
#include <node_context.hpp>
#include <paddlepaddle_frontend/utility.hpp>
namespace ngraph
{
namespace frontend
{
namespace pdpd
{
namespace op
{
NamedOutputs reshape2(const NodeContext& node)
{
auto data = node.get_ng_input("X");
if (!node.has_ng_input("Shape") && !node.has_ng_input("ShapeTensor"))
{
auto shape_attr = node.get_attribute<std::vector<int32_t>>("shape");
auto shape_node = ngraph::opset6::Constant::create(
ngraph::element::i32, {shape_attr.size()}, shape_attr);
return node.default_single_output_mapping(
{std::make_shared<ngraph::opset6::Reshape>(data, shape_node, true)},
{"Out"});
}
else
{
std::string name = "Shape";
if (node.has_ng_input("ShapeTensor"))
{
name = "ShapeTensor";
}
auto nodes = node.get_ng_inputs(name);
ngraph::NodeVector node_vec;
for (auto& input_node : nodes)
{
auto cast =
std::make_shared<ngraph::opset6::Convert>(input_node, element::i64);
node_vec.push_back(cast);
}
auto shape_node = std::make_shared<ngraph::opset6::Concat>(node_vec, 0);
return node.default_single_output_mapping(
{std::make_shared<ngraph::opset6::Reshape>(data, shape_node, true)},
{"Out"});
}
}
} // namespace op
} // namespace pdpd
} // namespace frontend
} // namespace ngraph

View File

@ -0,0 +1,26 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <ngraph/opsets/opset6.hpp>
#include <node_context.hpp>
namespace ngraph
{
namespace frontend
{
namespace pdpd
{
namespace op
{
NamedOutputs shape(const NodeContext& node)
{
auto data = node.get_ng_input("Input");
auto shape_node = std::make_shared<ngraph::opset6::ShapeOf>(data, element::i32);
return node.default_single_output_mapping({shape_node}, {"Out"});
}
} // namespace op
} // namespace pdpd
} // namespace frontend
} // namespace ngraph

View File

@ -0,0 +1,26 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <ngraph/opsets/opset6.hpp>
#include <node_context.hpp>
namespace ngraph
{
namespace frontend
{
namespace pdpd
{
namespace op
{
NamedOutputs sigmoid(const NodeContext& node)
{
auto data = node.get_ng_input("X");
return node.default_single_output_mapping(
{std::make_shared<ngraph::opset6::Sigmoid>(data)}, {"Out"});
}
} // namespace op
} // namespace pdpd
} // namespace frontend
} // namespace ngraph

View File

@ -0,0 +1,82 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <limits.h>
#include <ngraph/opsets/opset6.hpp>
#include <node_context.hpp>
namespace ngraph
{
namespace frontend
{
namespace pdpd
{
namespace op
{
NamedOutputs slice(const NodeContext& node)
{
auto data = node.get_ng_input("Input");
auto axes = node.get_attribute<std::vector<int32_t>>("axes");
Output<Node> start_idx_node, end_idx_node;
if (node.has_ng_input("StartsTensor"))
{
start_idx_node = node.get_ng_input("StartsTensor");
}
else if (node.has_ng_input("StartsTensorList"))
{
auto inputs = node.get_ng_inputs("StartsTensorList");
start_idx_node = std::make_shared<ngraph::opset6::Concat>(inputs, 0);
}
else
{
auto starts = node.get_attribute<std::vector<int32_t>>("starts");
start_idx_node =
opset6::Constant::create(element::i32, {starts.size()}, starts);
}
if (node.has_ng_input("EndsTensor"))
{
end_idx_node = node.get_ng_input("EndsTensor");
}
else if (node.has_ng_input("EndsTensorList"))
{
auto inputs = node.get_ng_inputs("EndsTensorList");
end_idx_node = std::make_shared<ngraph::opset6::Concat>(inputs, 0);
}
else
{
auto ends = node.get_attribute<std::vector<int32_t>>("ends");
end_idx_node = opset6::Constant::create(element::i32, {ends.size()}, ends);
}
// the shape of input, such as [1, 1, 3, 3]
auto shape_node = std::make_shared<opset6::ShapeOf>(data, element::Type_t::i32);
// the input dim, such as [4]
auto shape_shape_node =
std::make_shared<opset6::ShapeOf>(shape_node, element::i32);
auto const_0_node = opset6::Constant::create(element::i32, {}, {0});
auto const_max_node = opset6::Constant::create(element::i32, {}, {INT_MAX});
// array [0:max)
auto start_node =
std::make_shared<opset6::Broadcast>(const_0_node, shape_shape_node);
auto end_node =
std::make_shared<opset6::Broadcast>(const_max_node, shape_shape_node);
auto axes_node = opset6::Constant::create(element::i32, {axes.size(), 1}, axes);
auto fixed_start_node = std::make_shared<opset6::ScatterNDUpdate>(
start_node, axes_node, start_idx_node);
auto fixed_end_node = std::make_shared<opset6::ScatterNDUpdate>(
end_node, axes_node, end_idx_node);
return node.default_single_output_mapping(
{std::make_shared<ngraph::opset6::StridedSlice>(data,
fixed_start_node,
fixed_end_node,
std::vector<int64_t>{},
std::vector<int64_t>{})},
{"Out"});
}
} // namespace op
} // namespace pdpd
} // namespace frontend
} // namespace ngraph

View File

@ -0,0 +1,34 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <ngraph/opsets/opset6.hpp>
#include <node_context.hpp>
namespace ngraph
{
namespace frontend
{
namespace pdpd
{
namespace op
{
NamedOutputs softmax(const NodeContext& node)
{
auto data = node.get_ng_input("X");
auto axis = node.get_attribute<int32_t>("axis");
if (axis < 0)
{
PDPD_OP_VALIDATION_CHECK(node,
data.get_partial_shape().rank().is_static(),
"Softmax rank must be static");
auto data_rank = data.get_partial_shape().rank().get_length();
axis = data_rank + axis;
}
return node.default_single_output_mapping(
{std::make_shared<ngraph::opset6::Softmax>(data, axis)}, {"Out"});
}
} // namespace op
} // namespace pdpd
} // namespace frontend
} // namespace ngraph

View File

@ -0,0 +1,41 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <ngraph/opsets/opset6.hpp>
#include <node_context.hpp>
namespace ngraph
{
namespace frontend
{
namespace pdpd
{
namespace op
{
NamedOutputs squeeze(const NodeContext& node)
{
auto data = node.get_ng_input("X");
std::vector<int32_t> axes;
if (node.has_attribute<std::vector<int32_t>>("axes"))
{
axes = node.get_attribute<std::vector<int32_t>>("axes");
}
std::shared_ptr<Node> out;
if (!axes.empty())
{
auto axesNode = ngraph::opset6::Constant::create(
ngraph::element::i32, {axes.size()}, axes);
out = std::make_shared<ngraph::opset6::Squeeze>(data, axesNode);
}
else
{
out = std::make_shared<ngraph::opset6::Squeeze>(data);
}
return node.default_single_output_mapping(out, {"Out"});
}
} // namespace op
} // namespace pdpd
} // namespace frontend
} // namespace ngraph

View File

@ -0,0 +1,41 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <ngraph/opsets/opset6.hpp>
#include <node_context.hpp>
namespace ngraph
{
namespace frontend
{
namespace pdpd
{
namespace op
{
NamedOutputs unsqueeze(const NodeContext& node)
{
auto data = node.get_ng_input("X");
Output<Node> axesNode;
if (node.has_ng_input("AxesTensor"))
{
axesNode = node.get_ng_input("AxesTensor");
}
else if (node.has_ng_input("AxesTensorList"))
{
auto inputs = node.get_ng_inputs("AxesTensorList");
axesNode = std::make_shared<ngraph::opset6::Concat>(inputs, 0);
}
else
{
auto axes = node.get_attribute<std::vector<int32_t>>("axes");
axesNode = ngraph::opset6::Constant::create(
ngraph::element::i32, {axes.size()}, axes);
}
return node.default_single_output_mapping(
{std::make_shared<ngraph::opset6::Unsqueeze>(data, axesNode)}, {"Out"});
}
} // namespace op
} // namespace pdpd
} // namespace frontend
} // namespace ngraph

View File

@ -0,0 +1,433 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <limits> // std::numeric_limits
#include <numeric>
#include <ngraph/opsets/opset6.hpp>
#include <node_context.hpp>
namespace ngraph
{
namespace frontend
{
namespace pdpd
{
namespace op
{
using namespace opset6;
using namespace element;
// reference
// Paddle/python/paddle/fluid/tests/unittests/test_yolo_box_op.py
// Paddle/paddle/fluid/operators/detection/yolo_box_op.h
// Paddle2ONNX/paddle2onnx/op_mapper/detection/yolo_box.py - clip_bbox is not used
// by Paddle2ONNX.
NamedOutputs yolo_box(const NodeContext& node_context)
{
auto data = node_context.get_ng_input("X");
auto image_size = node_context.get_ng_input("ImgSize");
// get shape of X
auto input_shape = std::make_shared<ShapeOf>(data, i64);
auto indices_batchsize = Constant::create<int32_t>(i64, {1}, {0});
auto indices_height = Constant::create<int32_t>(i64, {1}, {2});
auto indices_width = Constant::create<int64_t>(i64, {1}, {3});
auto const_axis0 = Constant::create<int64_t>(i64, {1}, {0});
auto input_height =
std::make_shared<Gather>(input_shape, indices_height, const_axis0); // H
auto input_width =
std::make_shared<Gather>(input_shape, indices_width, const_axis0); // W
auto batch_size =
std::make_shared<Gather>(input_shape, indices_batchsize, const_axis0); // N
int32_t class_num = node_context.get_attribute<int32_t>("class_num");
auto const_class_num = Constant::create<int64_t>(i64, {1}, {class_num});
// PDPD anchors attribute is of type int32. Convert to float for computing
// convinient.
auto _anchors = node_context.get_attribute<std::vector<int32_t>>("anchors");
std::vector<float> anchors(_anchors.begin(), _anchors.end());
uint32_t num_anchors = anchors.size() / 2;
auto const_num_anchors = Constant::create<int64_t>(i64, {1}, {num_anchors});
auto default_scale = 1.0f;
auto scale_x_y = node_context.get_attribute<float>("scale_x_y", default_scale);
auto downsample_ratio = node_context.get_attribute<int32_t>("downsample_ratio");
auto const_downsample_ratio =
Constant::create<int64_t>(i64, {1}, {downsample_ratio});
auto scaled_input_height =
std::make_shared<Multiply>(input_height, const_downsample_ratio);
auto scaled_input_width =
std::make_shared<Multiply>(input_width, const_downsample_ratio);
// score_shape {batch_size, input_height * input_width * num_anchors, class_num}
auto node_mul_whc = std::make_shared<Multiply>(input_height, input_width);
node_mul_whc = std::make_shared<Multiply>(node_mul_whc, const_num_anchors);
auto score_shape = std::make_shared<Concat>(
NodeVector{batch_size, node_mul_whc, const_class_num}, 0);
auto conf_thresh = node_context.get_attribute<float>("conf_thresh");
auto const_conf_thresh = Constant::create<float>(f32, {1}, {conf_thresh});
auto clip_bbox = node_context.get_attribute<bool>("clip_bbox");
// main X
// node_x_shape {batch_size, num_anchors, 5 + class_num, input_height,
// input_width}
auto const_class_num_plus5 =
Constant::create<int64_t>(i64, {1}, {5 + class_num});
auto node_x_shape = std::make_shared<Concat>(NodeVector{batch_size,
const_num_anchors,
const_class_num_plus5,
input_height,
input_width},
0);
auto node_x_reshape = std::make_shared<Reshape>(data, node_x_shape, false);
auto node_input_order = Constant::create<int64_t>(i64, {5}, {0, 1, 3, 4, 2});
auto node_x_transpose =
std::make_shared<Transpose>(node_x_reshape, node_input_order);
// range x/y
// range_x: shape {1, input_width} containing 0...input_width
// range_y: shape {input_height, 1} containing 0...input_height
auto const_start = Constant::create<float>(f32, {}, {0.f});
auto const_step = Constant::create<float>(f32, {}, {1.f});
auto reduction_axes = Constant::create<int64_t>(i64, {1}, {0});
auto scaler_input_width =
std::make_shared<ReduceMin>(input_width, reduction_axes, false);
auto range_x =
std::make_shared<Range>(const_start, scaler_input_width, const_step, f32);
auto node_range_x = std::make_shared<Unsqueeze>(
range_x, Constant::create<int64_t>(i64, {1}, {0}));
auto scaler_input_height =
std::make_shared<ReduceMin>(input_height, reduction_axes, false);
auto range_y =
std::make_shared<Range>(const_start, scaler_input_height, const_step, f32);
auto node_range_y = std::make_shared<Unsqueeze>(
range_y, Constant::create<int64_t>(i64, {1}, {1}));
auto node_range_x_shape = std::make_shared<Concat>(
NodeVector{Constant::create<int64_t>(i64, {1}, {1}), input_width}, 0);
auto node_range_y_shape = std::make_shared<Concat>(
NodeVector{input_height, Constant::create<int64_t>(i64, {1}, {1})}, 0);
auto node_grid_x =
std::make_shared<Tile>(node_range_x, node_range_y_shape); // shape (H, W)
auto node_grid_y = std::make_shared<Tile>(node_range_y, node_range_x_shape);
// main X (part2)
auto node_split_axis = Constant::create<int64_t>(i64, {1}, {-1});
auto node_split_lengths =
Constant::create<int64_t>(i64, {6}, {1, 1, 1, 1, 1, class_num});
auto node_split_input = std::make_shared<VariadicSplit>(
node_x_transpose, node_split_axis, node_split_lengths);
auto node_box_x =
node_split_input->output(0); // shape (batch_size, num_anchors, H, W, 1)
auto node_box_y = node_split_input->output(1);
auto node_box_w = node_split_input->output(2);
auto node_box_h = node_split_input->output(3);
auto node_conf = node_split_input->output(4);
auto node_prob = node_split_input->output(5);
// x/y
std::shared_ptr<ngraph::Node> node_box_x_sigmoid =
std::make_shared<Sigmoid>(node_box_x);
std::shared_ptr<ngraph::Node> node_box_y_sigmoid =
std::make_shared<Sigmoid>(node_box_y);
if (std::fabs(scale_x_y - default_scale) > 1e-6)
{ // float not-equal
float bias_x_y = -0.5 * (scale_x_y - 1.0);
auto scale_x_y_node = Constant::create<float>(f32, {1}, {scale_x_y});
auto bias_x_y_node = Constant::create<float>(f32, {1}, {bias_x_y});
node_box_x_sigmoid =
std::make_shared<Multiply>(node_box_x_sigmoid, scale_x_y_node);
node_box_x_sigmoid =
std::make_shared<Add>(node_box_x_sigmoid, bias_x_y_node);
node_box_y_sigmoid =
std::make_shared<Multiply>(node_box_y_sigmoid, scale_x_y_node);
node_box_y_sigmoid =
std::make_shared<Add>(node_box_y_sigmoid, bias_x_y_node);
}
auto squeeze_box_x = Constant::create<int64_t>(i64, {1}, {4});
auto node_box_x_squeeze =
std::make_shared<Squeeze>(node_box_x_sigmoid, squeeze_box_x);
auto squeeze_box_y = Constant::create<int64_t>(i64, {1}, {4});
auto node_box_y_squeeze =
std::make_shared<Squeeze>(node_box_y_sigmoid, squeeze_box_y);
auto node_box_x_add_grid =
std::make_shared<Add>(node_grid_x, node_box_x_squeeze);
auto node_box_y_add_grid =
std::make_shared<Add>(node_grid_y, node_box_y_squeeze);
auto node_input_h = std::make_shared<Convert>(input_height, element::f32);
auto node_input_w = std::make_shared<Convert>(input_width, element::f32);
auto node_box_x_encode =
std::make_shared<Divide>(node_box_x_add_grid, node_input_w);
auto node_box_y_encode =
std::make_shared<Divide>(node_box_y_add_grid, node_input_h);
// w/h
auto node_anchor_tensor =
Constant::create<float>(f32, {num_anchors, 2}, anchors);
auto split_axis = Constant::create<int64_t>(i64, {}, {1});
auto node_anchor_split =
std::make_shared<Split>(node_anchor_tensor, split_axis, 2);
auto node_anchor_w_origin = node_anchor_split->output(0);
auto node_anchor_h_origin = node_anchor_split->output(1);
auto float_input_height =
std::make_shared<Convert>(scaled_input_height, element::f32);
auto node_anchor_h =
std::make_shared<Divide>(node_anchor_h_origin, float_input_height);
auto float_input_width =
std::make_shared<Convert>(scaled_input_width, element::f32);
auto node_anchor_w =
std::make_shared<Divide>(node_anchor_w_origin, float_input_width);
auto node_new_anchor_shape =
Constant::create<int64_t>(i64, {4}, {1, num_anchors, 1, 1});
auto node_anchor_w_reshape =
std::make_shared<Reshape>(node_anchor_w, node_new_anchor_shape, false);
auto node_anchor_h_reshape =
std::make_shared<Reshape>(node_anchor_h, node_new_anchor_shape, false);
auto squeeze_box_wh = Constant::create<int64_t>(i64, {1}, {4});
auto node_box_w_squeeze = std::make_shared<Squeeze>(node_box_w, squeeze_box_wh);
auto node_box_h_squeeze = std::make_shared<Squeeze>(node_box_h, squeeze_box_wh);
auto node_box_w_exp = std::make_shared<Exp>(node_box_w_squeeze);
auto node_box_h_exp = std::make_shared<Exp>(node_box_h_squeeze);
auto node_box_w_encode =
std::make_shared<Multiply>(node_box_w_exp, node_anchor_w_reshape);
auto node_box_h_encode =
std::make_shared<Multiply>(node_box_h_exp, node_anchor_h_reshape);
// confidence
auto node_conf_sigmoid = std::make_shared<Sigmoid>(node_conf);
auto node_concat = std::make_shared<Concat>(
NodeVector{Constant::create<int64_t>(i64, {1}, {1}),
const_num_anchors,
input_height,
input_width,
Constant::create<int64_t>(i64, {1}, {1})},
0);
auto node_conf_thresh = std::make_shared<Broadcast>(
const_conf_thresh,
node_concat); // {1, num_anchors, input_height, input_width, 1}
auto node_conf_sub =
std::make_shared<Subtract>(node_conf_sigmoid, node_conf_thresh);
auto node_conf_clip = std::make_shared<Clamp>(
node_conf_sub, 0.0f, std::numeric_limits<float>::max());
auto node_zeros = Constant::create<float>(f32, {1}, {0});
auto node_conf_clip_bool =
std::make_shared<Greater>(node_conf_clip, node_zeros);
auto node_conf_clip_cast = std::make_shared<Convert>(node_conf_clip_bool, f32);
auto node_conf_set_zero =
std::make_shared<Multiply>(node_conf_sigmoid, node_conf_clip_cast);
/* probability */
auto node_prob_sigmoid = std::make_shared<Sigmoid>(node_prob);
auto node_new_shape = std::make_shared<Concat>(
NodeVector{batch_size,
const_num_anchors,
input_height,
input_width,
Constant::create<int64_t>(i64, {1}, {1})},
0);
auto node_conf_new_shape = std::make_shared<Reshape>(
node_conf_set_zero,
node_new_shape,
false); // {batch_size, int(num_anchors), input_height, input_width, 1}
// broadcast confidence * probability of each category
auto node_score =
std::make_shared<Multiply>(node_prob_sigmoid, node_conf_new_shape);
// for bbox which has object (greater than threshold)
auto node_conf_bool =
std::make_shared<Greater>(node_conf_new_shape, node_zeros);
auto node_box_x_new_shape =
std::make_shared<Reshape>(node_box_x_encode, node_new_shape, false);
auto node_box_y_new_shape =
std::make_shared<Reshape>(node_box_y_encode, node_new_shape, false);
auto node_box_w_new_shape =
std::make_shared<Reshape>(node_box_w_encode, node_new_shape, false);
auto node_box_h_new_shape =
std::make_shared<Reshape>(node_box_h_encode, node_new_shape, false);
auto node_pred_box =
std::make_shared<Concat>(OutputVector{node_box_x_new_shape,
node_box_y_new_shape,
node_box_w_new_shape,
node_box_h_new_shape},
4);
auto node_conf_cast = std::make_shared<Convert>(node_conf_bool, f32);
auto node_pred_box_mul_conf =
std::make_shared<Multiply>(node_pred_box, node_conf_cast);
auto node_box_shape = std::make_shared<Concat>(
NodeVector{
batch_size, node_mul_whc, Constant::create<int64_t>(i64, {1}, {4})},
0);
auto node_pred_box_new_shape = std::make_shared<Reshape>(
node_pred_box_mul_conf,
node_box_shape,
false); // {batch_size, int(num_anchors) * input_height * input_width, 4}
auto pred_box_split_axis = Constant::create<int32_t>(i64, {}, {2});
auto node_pred_box_split =
std::make_shared<Split>(node_pred_box_new_shape, pred_box_split_axis, 4);
auto node_pred_box_x = node_pred_box_split->output(0);
auto node_pred_box_y = node_pred_box_split->output(1);
auto node_pred_box_w = node_pred_box_split->output(2);
auto node_pred_box_h = node_pred_box_split->output(3);
/* x,y,w,h -> x1,y1,x2,y2 */
auto node_number_two = Constant::create<float>(f32, {1}, {2.0f});
auto node_half_w = std::make_shared<Divide>(node_pred_box_w, node_number_two);
auto node_half_h = std::make_shared<Divide>(node_pred_box_h, node_number_two);
auto node_pred_box_x1 =
std::make_shared<Subtract>(node_pred_box_x, node_half_w);
auto node_pred_box_y1 =
std::make_shared<Subtract>(node_pred_box_y, node_half_h);
auto node_pred_box_x2 = std::make_shared<Add>(node_pred_box_x, node_half_w);
auto node_pred_box_y2 = std::make_shared<Add>(node_pred_box_y, node_half_h);
/* map normalized coords to original image */
auto indices_height_imgsize = Constant::create<int32_t>(i64, {1}, {0});
auto indices_width_imgsize = Constant::create<int64_t>(i64, {1}, {1});
auto const_axis1 = Constant::create<int64_t>(i64, {1}, {1});
auto node_img_height = std::make_shared<Gather>(
image_size, indices_height_imgsize, const_axis1); // shape_image_size[0]
auto node_img_width = std::make_shared<Gather>(
image_size, indices_width_imgsize, const_axis1); // shape_image_size[1]
auto node_img_width_cast = std::make_shared<Convert>(node_img_width, f32);
auto node_img_height_cast = std::make_shared<Convert>(node_img_height, f32);
auto squeeze_axes2 = Constant::create<int64_t>(i64, {1}, {2});
auto node_pred_box_x1_reshape = std::make_shared<Squeeze>(
node_pred_box_x1,
squeeze_axes2); // shape (N,C,1) -> (N,C) for upcomping multiply.
auto node_pred_box_y1_reshape =
std::make_shared<Squeeze>(node_pred_box_y1, squeeze_axes2);
auto node_pred_box_x2_reshape =
std::make_shared<Squeeze>(node_pred_box_x2, squeeze_axes2);
auto node_pred_box_y2_reshape =
std::make_shared<Squeeze>(node_pred_box_y2, squeeze_axes2);
auto node_pred_box_x1_squeeze =
std::make_shared<Multiply>(node_pred_box_x1_reshape, node_img_width_cast);
auto node_pred_box_y1_squeeze =
std::make_shared<Multiply>(node_pred_box_y1_reshape, node_img_height_cast);
auto node_pred_box_x2_squeeze =
std::make_shared<Multiply>(node_pred_box_x2_reshape, node_img_width_cast);
auto node_pred_box_y2_squeeze =
std::make_shared<Multiply>(node_pred_box_y2_reshape, node_img_height_cast);
std::shared_ptr<ngraph::Node> node_pred_box_result;
if (clip_bbox)
{
auto node_number_one = Constant::create<float>(f32, {1}, {1.0});
auto node_new_img_height =
std::make_shared<Subtract>(node_img_height_cast, node_number_one);
auto node_new_img_width =
std::make_shared<Subtract>(node_img_width_cast, node_number_one);
auto node_pred_box_x2_sub_w = std::make_shared<Subtract>(
node_pred_box_x2_squeeze, node_new_img_width); // x2 - (w-1)
auto node_pred_box_y2_sub_h = std::make_shared<Subtract>(
node_pred_box_y2_squeeze, node_new_img_height); // y2 - (h-1)
auto max_const = std::numeric_limits<float>::max();
auto node_pred_box_x1_clip =
std::make_shared<Clamp>(node_pred_box_x1_squeeze, 0.0f, max_const);
auto node_pred_box_y1_clip =
std::make_shared<Clamp>(node_pred_box_y1_squeeze, 0.0f, max_const);
auto node_pred_box_x2_clip =
std::make_shared<Clamp>(node_pred_box_x2_sub_w, 0.0f, max_const);
auto node_pred_box_y2_clip =
std::make_shared<Clamp>(node_pred_box_y2_sub_h, 0.0f, max_const);
auto node_pred_box_x2_res = std::make_shared<Subtract>(
node_pred_box_x2_squeeze, node_pred_box_x2_clip);
auto node_pred_box_y2_res = std::make_shared<Subtract>(
node_pred_box_y2_squeeze, node_pred_box_y2_clip);
auto node_pred_box_x1_clip2 = std::make_shared<Unsqueeze>(
node_pred_box_x1_clip, squeeze_axes2); // reshape back to (N,C,1)
auto node_pred_box_y1_clip2 =
std::make_shared<Unsqueeze>(node_pred_box_y1_clip, squeeze_axes2);
auto node_pred_box_x2_res2 =
std::make_shared<Unsqueeze>(node_pred_box_x2_res, squeeze_axes2);
auto node_pred_box_y2_res2 =
std::make_shared<Unsqueeze>(node_pred_box_y2_res, squeeze_axes2);
node_pred_box_result =
std::make_shared<Concat>(OutputVector{node_pred_box_x1_clip2,
node_pred_box_y1_clip2,
node_pred_box_x2_res2,
node_pred_box_y2_res2},
-1); // outputs=node.output('Boxes')
}
else
{
auto node_pred_box_x1_decode = std::make_shared<Unsqueeze>(
node_pred_box_x1_squeeze, squeeze_axes2); // reshape back to (N,C,1)
auto node_pred_box_y1_decode =
std::make_shared<Unsqueeze>(node_pred_box_y1_squeeze, squeeze_axes2);
auto node_pred_box_x2_decode =
std::make_shared<Unsqueeze>(node_pred_box_x2_squeeze, squeeze_axes2);
auto node_pred_box_y2_decode =
std::make_shared<Unsqueeze>(node_pred_box_y2_squeeze, squeeze_axes2);
node_pred_box_result =
std::make_shared<Concat>(OutputVector{node_pred_box_x1_decode,
node_pred_box_y1_decode,
node_pred_box_x2_decode,
node_pred_box_y2_decode},
-1); // outputs=node.output('Boxes')
}
//
auto node_score_new_shape = std::make_shared<Reshape>(
node_score, score_shape, false); // outputs=node.output('Scores')
NamedOutputs outputs;
outputs["Boxes"] = {node_pred_box_result};
outputs["Scores"] = {node_score_new_shape};
return outputs;
}
} // namespace op
} // namespace pdpd
} // namespace frontend
} // namespace ngraph

View File

@ -16,7 +16,6 @@ namespace ngraph
OP_CONVERTER(assign_value);
OP_CONVERTER(batch_norm);
OP_CONVERTER(bilinear_interp_v2);
OP_CONVERTER(matmul);
OP_CONVERTER(cast);
OP_CONVERTER(clip);
OP_CONVERTER(concat);
@ -43,14 +42,28 @@ namespace ngraph
OP_CONVERTER(log);
OP_CONVERTER(logical_not);
OP_CONVERTER(matmul);
OP_CONVERTER(mul);
OP_CONVERTER(matrix_nms);
OP_CONVERTER(multiclass_nms);
OP_CONVERTER(nearest_interp_v2);
OP_CONVERTER(rnn);
OP_CONVERTER(pad3d);
OP_CONVERTER(pow);
OP_CONVERTER(pool2d);
OP_CONVERTER(range);
OP_CONVERTER(relu);
OP_CONVERTER(relu6);
OP_CONVERTER(reshape2);
OP_CONVERTER(rnn);
OP_CONVERTER(scale);
OP_CONVERTER(shape);
OP_CONVERTER(slice);
OP_CONVERTER(softmax);
OP_CONVERTER(sigmoid);
OP_CONVERTER(split);
OP_CONVERTER(squeeze);
OP_CONVERTER(transpose2);
OP_CONVERTER(unsqueeze);
OP_CONVERTER(yolo_box);
} // namespace op
} // namespace pdpd
} // namespace frontend
@ -64,54 +77,68 @@ namespace ngraph
{
std::map<std::string, CreatorFunction> get_supported_ops()
{
return {
{"arg_max", op::argmax},
{"assign_value", op::assign_value},
{"batch_norm", op::batch_norm},
{"bilinear_interp_v2", op::bilinear_interp_v2},
{"bilinear_interp", op::bilinear_interp_v2},
{"bmm", op::matmul},
{"cast", op::cast},
{"clip", op::clip},
{"concat", op::concat},
{"conv2d", op::conv2d},
{"conv2d_transpose", op::conv2d_transpose},
{"deformable_conv", op::deformable_conv},
{"deformable_conv_v1", op::deformable_conv},
{"depthwise_conv2d", op::conv2d},
{"depthwise_conv2d_transpose", op::conv2d_transpose},
{"dropout", op::dropout},
{"elementwise_add", op::elementwise_add},
{"elementwise_div", op::elementwise_div},
{"elementwise_max", op::elementwise_max},
{"elementwise_min", op::elementwise_min},
{"elementwise_mul", op::elementwise_mul},
{"elementwise_pow", op::elementwise_pow},
{"elementwise_sub", op::elementwise_sub},
{"equal", op::elementwise_equal},
{"expand_v2", op::expand_v2},
{"fill_constant_batch_size_like", op::fill_constant_batch_size_like},
{"fill_constant", op::fill_constant},
{"flatten_contiguous_range", op::flatten_contiguous_range},
{"greater_equal", op::elementwise_greater_equal},
{"hard_sigmoid", op::hard_sigmoid},
{"hard_swish", op::hard_swish},
{"leaky_relu", op::leaky_relu},
{"log", op::log},
{"logical_not", op::logical_not},
{"matmul", op::matmul},
{"matrix_nms", op::matrix_nms},
{"multiclass_nms3", op::multiclass_nms},
{"nearest_interp_v2", op::nearest_interp_v2},
{"nearest_interp", op::nearest_interp_v2},
{"rnn", op::rnn},
{"relu", op::relu},
{"scale", op::scale},
{"split", op::split},
{"transpose2", op::transpose2},
};
return {{"arg_max", op::argmax},
{"assign_value", op::assign_value},
{"batch_norm", op::batch_norm},
{"bilinear_interp_v2", op::bilinear_interp_v2},
{"bilinear_interp", op::bilinear_interp_v2},
{"bmm", op::matmul},
{"cast", op::cast},
{"clip", op::clip},
{"concat", op::concat},
{"conv2d", op::conv2d},
{"conv2d_transpose", op::conv2d_transpose},
{"deformable_conv", op::deformable_conv},
{"deformable_conv_v1", op::deformable_conv},
{"depthwise_conv2d", op::conv2d},
{"depthwise_conv2d_transpose", op::conv2d_transpose},
{"dropout", op::dropout},
{"elementwise_add", op::elementwise_add},
{"elementwise_div", op::elementwise_div},
{"elementwise_max", op::elementwise_max},
{"elementwise_min", op::elementwise_min},
{"elementwise_mul", op::elementwise_mul},
{"elementwise_pow", op::elementwise_pow},
{"elementwise_sub", op::elementwise_sub},
{"equal", op::elementwise_equal},
{"expand_v2", op::expand_v2},
{"fill_constant_batch_size_like", op::fill_constant_batch_size_like},
{"fill_constant", op::fill_constant},
{"flatten_contiguous_range", op::flatten_contiguous_range},
{"greater_equal", op::elementwise_greater_equal},
{"hard_sigmoid", op::hard_sigmoid},
{"hard_swish", op::hard_swish},
{"leaky_relu", op::leaky_relu},
{"log", op::log},
{"logical_not", op::logical_not},
{"matmul", op::matmul},
{"max_pool2d_with_index", op::pool2d},
{"mul", op::mul},
{"matrix_nms", op::matrix_nms},
{"multiclass_nms3", op::multiclass_nms},
{"nearest_interp_v2", op::nearest_interp_v2},
{"nearest_interp", op::nearest_interp_v2},
{"pad3d", op::pad3d},
{"pow", op::pow},
{"pool2d", op::pool2d},
{"range", op::range},
{"relu", op::relu},
{"relu6", op::relu6},
{"reshape2", op::reshape2},
{"rnn", op::rnn},
{"scale", op::scale},
{"shape", op::shape},
{"slice", op::slice},
{"softmax", op::softmax},
{"sigmoid", op::sigmoid},
{"split", op::split},
{"squeeze2", op::squeeze},
{"sync_batch_norm", op::batch_norm},
{"transpose2", op::transpose2},
{"unsqueeze2", op::unsqueeze},
{"yolo_box", op::yolo_box}};
};
} // namespace pdpd
} // namespace frontend
} // namespace ngraph
} // namespace ngraph