【PaddlePaddle Hackathon 4】add paddle one_hot_v2 op (#15859)
* update op linspace * rewrite function name * add one_hot op mapping * change status * add depth_tensor * adjust op test * Update generate_one_hot_v2.py * adjust cpp * adjust cpp * remove default value * Update Supported_Frameworks_Layers.md * support N-dims * remove restriction --------- Co-authored-by: Yu Xu <yu.xu@intel.com> Co-authored-by: cecilia peng <cecilia.peng@intel.com> Co-authored-by: Xiuchuan Zhai <xiuchuan.zhai@intel.com>
This commit is contained in:
parent
5b6bab2636
commit
0963c23b29
@ -771,6 +771,7 @@ paddlepaddle >= 2.1
|
|||||||
multiclass_nms Only supports IE CPU plugin with "number of selected boxes" static shape (e.g.: ``min(min(num_boxes, nms_top_k) * num_classes_output, keep_top_k)``).
|
multiclass_nms Only supports IE CPU plugin with "number of selected boxes" static shape (e.g.: ``min(min(num_boxes, nms_top_k) * num_classes_output, keep_top_k)``).
|
||||||
nearest_interp ``NCW``, ``NWC``, ``NHWC``, ``NCDHW``, ``NDHWC`` data_layout are not supported.
|
nearest_interp ``NCW``, ``NWC``, ``NHWC``, ``NCDHW``, ``NDHWC`` data_layout are not supported.
|
||||||
not_equal
|
not_equal
|
||||||
|
one_hot_v2
|
||||||
p_norm
|
p_norm
|
||||||
pad3d ``Circular`` mode is not supported.
|
pad3d ``Circular`` mode is not supported.
|
||||||
pool2d ``NHWC`` data_layout is not supported.
|
pool2d ``NHWC`` data_layout is not supported.
|
||||||
|
31
src/frontends/paddle/src/op/one_hot_v2.cpp
Normal file
31
src/frontends/paddle/src/op/one_hot_v2.cpp
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
// Copyright (C) 2018-2023 Intel Corporation
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
//
|
||||||
|
|
||||||
|
#include "default_opset.hpp"
|
||||||
|
#include "openvino/frontend/paddle/node_context.hpp"
|
||||||
|
|
||||||
|
namespace ov {
|
||||||
|
namespace frontend {
|
||||||
|
namespace paddle {
|
||||||
|
namespace op {
|
||||||
|
NamedOutputs one_hot_v2(const NodeContext& node) {
|
||||||
|
auto data = node.get_input("X");
|
||||||
|
Output<Node> depth;
|
||||||
|
if (node.has_input("depth_tensor")) {
|
||||||
|
auto depth_value = node.get_input("depth_tensor");
|
||||||
|
depth = std::make_shared<default_opset::Squeeze>(depth_value);
|
||||||
|
} else {
|
||||||
|
const auto depth_value = node.get_attribute<int>("depth");
|
||||||
|
depth = default_opset::Constant::create(element::i32, Shape{}, {depth_value});
|
||||||
|
}
|
||||||
|
auto on_value = default_opset::Constant::create(element::f32, Shape{}, {1});
|
||||||
|
auto off_value = default_opset::Constant::create(element::f32, Shape{}, {0});
|
||||||
|
const auto indices_axis = -1;
|
||||||
|
auto result = std::make_shared<default_opset::OneHot>(data, depth, on_value, off_value, indices_axis);
|
||||||
|
return node.default_single_output_mapping({result}, {"Out"});
|
||||||
|
}
|
||||||
|
} // namespace op
|
||||||
|
} // namespace paddle
|
||||||
|
} // namespace frontend
|
||||||
|
} // namespace ov
|
@ -73,6 +73,7 @@ OP_CONVERTER(matrix_nms);
|
|||||||
OP_CONVERTER(meshgrid);
|
OP_CONVERTER(meshgrid);
|
||||||
OP_CONVERTER(multiclass_nms);
|
OP_CONVERTER(multiclass_nms);
|
||||||
OP_CONVERTER(nearest_interp_v2);
|
OP_CONVERTER(nearest_interp_v2);
|
||||||
|
OP_CONVERTER(one_hot_v2);
|
||||||
OP_CONVERTER(p_norm);
|
OP_CONVERTER(p_norm);
|
||||||
OP_CONVERTER(pad3d);
|
OP_CONVERTER(pad3d);
|
||||||
OP_CONVERTER(pow);
|
OP_CONVERTER(pow);
|
||||||
@ -195,6 +196,7 @@ std::map<std::string, CreatorFunction> get_supported_ops() {
|
|||||||
{"nearest_interp_v2", op::nearest_interp_v2},
|
{"nearest_interp_v2", op::nearest_interp_v2},
|
||||||
{"nearest_interp", op::nearest_interp_v2},
|
{"nearest_interp", op::nearest_interp_v2},
|
||||||
{"not_equal", op::elementwise_not_equal},
|
{"not_equal", op::elementwise_not_equal},
|
||||||
|
{"one_hot_v2", op::one_hot_v2},
|
||||||
{"p_norm", op::p_norm},
|
{"p_norm", op::p_norm},
|
||||||
{"pad3d", op::pad3d},
|
{"pad3d", op::pad3d},
|
||||||
{"pow", op::pow},
|
{"pow", op::pow},
|
||||||
|
@ -377,6 +377,9 @@ static const std::vector<std::string> models{
|
|||||||
std::string("not_equal_float32"),
|
std::string("not_equal_float32"),
|
||||||
std::string("not_equal_int32"),
|
std::string("not_equal_int32"),
|
||||||
std::string("not_equal_int64"),
|
std::string("not_equal_int64"),
|
||||||
|
std::string("one_hot_v2_1"),
|
||||||
|
std::string("one_hot_v2_2"),
|
||||||
|
std::string("one_hot_v2_3"),
|
||||||
std::string("p_norm1"),
|
std::string("p_norm1"),
|
||||||
std::string("p_norm2"),
|
std::string("p_norm2"),
|
||||||
std::string("p_norm3"),
|
std::string("p_norm3"),
|
||||||
|
@ -0,0 +1,47 @@
|
|||||||
|
# Copyright (C) 2018-2023 Intel Corporation
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
#
|
||||||
|
# one_hot_v2 paddle model generator
|
||||||
|
#
|
||||||
|
import paddle
|
||||||
|
import numpy as np
|
||||||
|
from save_model import saveModel
|
||||||
|
import sys
|
||||||
|
|
||||||
|
|
||||||
|
def one_hot_v2(name: str, x, num_classes, is_tensor):
|
||||||
|
paddle.enable_static()
|
||||||
|
|
||||||
|
with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):
|
||||||
|
x_node = paddle.static.data(name="x", shape=x.shape, dtype=x.dtype)
|
||||||
|
depth_node = paddle.static.data(name="depth_tensor", shape=num_classes.shape, dtype=num_classes.dtype) if is_tensor else num_classes
|
||||||
|
out = paddle.nn.functional.one_hot(x_node, num_classes=depth_node)
|
||||||
|
place = paddle.CPUPlace()
|
||||||
|
exe = paddle.static.Executor(place)
|
||||||
|
feed_list = {"x": x, "depth_tensor": num_classes} if is_tensor else {"x": x}
|
||||||
|
outs = exe.run(feed=feed_list, fetch_list=[out])
|
||||||
|
feedkey_list = ["x", "depth_tensor"] if is_tensor else ['x']
|
||||||
|
input_list = [x, num_classes] if is_tensor else [x]
|
||||||
|
saveModel(name, exe, feedkeys=feedkey_list, fetchlist=[out], inputs=input_list, outputs=[outs[0]], target_dir=sys.argv[1])
|
||||||
|
|
||||||
|
return outs[0]
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
# int 32
|
||||||
|
data = np.array([1]).astype("int32")
|
||||||
|
num_classes = 4
|
||||||
|
one_hot_v2("one_hot_v2_1", data, num_classes, is_tensor=False)
|
||||||
|
# rank 1 int64
|
||||||
|
data = np.array([4, 1, 3, 3]).astype("int64")
|
||||||
|
num_classes = np.array([5]).astype("int32")
|
||||||
|
one_hot_v2("one_hot_v2_2", data, num_classes, is_tensor=True)
|
||||||
|
# rank 2 int64
|
||||||
|
data = np.array([[4, 1, 3, 3], [1, 1, 3, 0]]).astype("int64")
|
||||||
|
num_classes = np.array([5]).astype("int32")
|
||||||
|
one_hot_v2("one_hot_v2_3", data, num_classes, is_tensor=True)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
Loading…
Reference in New Issue
Block a user