【PaddlePaddle Hackathon 3】Add Paddle sum operator (#12545)
* add paddle op sum * new version * add unit test for other precision * change layer.sum to add_n * remove redundant code Co-authored-by: Bo Liu <bo4.liu@intel.com> Co-authored-by: cecilia peng <cecilia.peng@intel.com>
This commit is contained in:
@@ -405,6 +405,10 @@ static const std::vector<std::string> models{
|
||||
std::string("strided_slice_input2_3"),
|
||||
std::string("strided_slice_input3_1"),
|
||||
std::string("strided_slice_input3_2"),
|
||||
std::string("sum_1"),
|
||||
std::string("sum_2"),
|
||||
std::string("sum_3"),
|
||||
std::string("sum_4"),
|
||||
std::string("swish_default_params"),
|
||||
std::string("swish_beta"),
|
||||
std::string("tanh"),
|
||||
|
||||
@@ -0,0 +1,70 @@
|
||||
from __future__ import print_function
|
||||
import sys
|
||||
import numpy as np
|
||||
import paddle
|
||||
|
||||
from save_model import saveModel
|
||||
|
||||
|
||||
def sum_(name: str, input):
|
||||
paddle.enable_static()
|
||||
with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):
|
||||
data = paddle.static.data('data', shape=input.shape, dtype=input.dtype)
|
||||
out = paddle.add_n(data)
|
||||
cpu = paddle.static.cpu_places(1)
|
||||
exe = paddle.static.Executor(cpu[0])
|
||||
# startup program will call initializer to initialize the parameters.
|
||||
exe.run(paddle.static.default_startup_program())
|
||||
outs = exe.run(
|
||||
feed={"data": input},
|
||||
fetch_list=[out])
|
||||
saveModel(name, exe, feedkeys=["data"], fetchlist=[out], inputs=[input], outputs=[outs[0]],
|
||||
target_dir=sys.argv[1])
|
||||
return outs[0]
|
||||
|
||||
|
||||
def sum(name: str, inputs):
|
||||
paddle.enable_static()
|
||||
with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):
|
||||
data0 = paddle.static.data('data0', shape=inputs[0].shape, dtype=inputs[0].dtype)
|
||||
data1 = paddle.static.data('data1', shape=inputs[1].shape, dtype=inputs[1].dtype)
|
||||
data2 = paddle.static.data('data2', shape=inputs[2].shape, dtype=inputs[2].dtype)
|
||||
out = paddle.add_n([data0, data1, data2])
|
||||
cpu = paddle.static.cpu_places(1)
|
||||
exe = paddle.static.Executor(cpu[0])
|
||||
# startup program will call initializer to initialize the parameters.
|
||||
exe.run(paddle.static.default_startup_program())
|
||||
outs = exe.run(
|
||||
feed={"data0": inputs[0],
|
||||
"data1": inputs[1],
|
||||
"data2": inputs[2]},
|
||||
fetch_list=[out])
|
||||
saveModel(name, exe, feedkeys=["data0", "data1", "data2"], fetchlist=[out],
|
||||
inputs=[inputs[0], inputs[1], inputs[2]], outputs=[outs[0]],
|
||||
target_dir=sys.argv[1])
|
||||
return outs[0]
|
||||
|
||||
|
||||
def main():
|
||||
# single tensor
|
||||
input = np.random.random([2, 3]).astype(np.float32)
|
||||
sum_("sum_1", input)
|
||||
# multiple tensors with type float32
|
||||
input1 = np.random.random([2, 3]).astype(np.float32)
|
||||
input2 = np.random.random([2, 3]).astype(np.float32)
|
||||
input3 = np.random.random([2, 3]).astype(np.float32)
|
||||
sum("sum_2", [input1, input2, input3])
|
||||
# multiple tensors with type int32
|
||||
input1 = np.random.random([2, 3]).astype(np.int32)
|
||||
input2 = np.random.random([2, 3]).astype(np.int32)
|
||||
input3 = np.random.random([2, 3]).astype(np.int32)
|
||||
sum("sum_3", [input1, input2, input3])
|
||||
# multiple tensors with type int64
|
||||
input1 = np.random.random([2, 3]).astype(np.int64)
|
||||
input2 = np.random.random([2, 3]).astype(np.int64)
|
||||
input3 = np.random.random([2, 3]).astype(np.int64)
|
||||
sum("sum_4", [input1, input2, input3])
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
24
src/frontends/paddle/src/op/sum.cpp
Normal file
24
src/frontends/paddle/src/op/sum.cpp
Normal file
@@ -0,0 +1,24 @@
|
||||
// Copyright (C) 2018-2022 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "default_opset.hpp"
|
||||
#include "openvino/frontend/paddle/node_context.hpp"
|
||||
|
||||
namespace ov {
|
||||
namespace frontend {
|
||||
namespace paddle {
|
||||
namespace op {
|
||||
NamedOutputs sum(const NodeContext& node) {
|
||||
auto data = node.get_ng_inputs("X");
|
||||
auto sum = data[0].get_node_shared_ptr();
|
||||
for (int i = 1; i < data.size(); i++) {
|
||||
sum = std::make_shared<default_opset::Add>(sum, data[i]);
|
||||
}
|
||||
return node.default_single_output_mapping({sum}, {"Out"});
|
||||
}
|
||||
|
||||
} // namespace op
|
||||
} // namespace paddle
|
||||
} // namespace frontend
|
||||
} // namespace ov
|
||||
@@ -88,6 +88,7 @@ OP_CONVERTER(sqrt);
|
||||
OP_CONVERTER(squeeze);
|
||||
OP_CONVERTER(stack);
|
||||
OP_CONVERTER(strided_slice);
|
||||
OP_CONVERTER(sum);
|
||||
OP_CONVERTER(swish);
|
||||
OP_CONVERTER(tanh);
|
||||
OP_CONVERTER(tile);
|
||||
@@ -187,6 +188,7 @@ std::map<std::string, CreatorFunction> get_supported_ops() {
|
||||
{"squeeze2", op::squeeze},
|
||||
{"stack", op::stack},
|
||||
{"strided_slice", op::strided_slice},
|
||||
{"sum", op::sum},
|
||||
{"swish", op::swish},
|
||||
{"sync_batch_norm", op::batch_norm},
|
||||
{"tanh", op::tanh},
|
||||
|
||||
Reference in New Issue
Block a user