【PaddlePaddle Hackathon 4】add paddle linspace op (#15835)

* update op linspace

* rewrite function name

* adjust op test

* adjust op test

* adjust op test

* format fuzzy

* remove annotations

* add suppot for int64

* remove umap

---------

Co-authored-by: cecilia peng <cecilia.peng@intel.com>
This commit is contained in:
NetPunk 2023-05-19 14:34:06 +08:00 committed by GitHub
parent 2680e9b7aa
commit 2e5468646c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 116 additions and 0 deletions

View File

@ -0,0 +1,53 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "default_opset.hpp"
#include "openvino/frontend/paddle/node_context.hpp"
#include "openvino/frontend/paddle/visibility.hpp"
namespace ov {
namespace frontend {
namespace paddle {
namespace op {
NamedOutputs linspace(const NodeContext& node) {
auto start = node.get_input("Start");
auto stop = node.get_input("Stop");
auto num = node.get_input("Num");
auto dtype = node.get_attribute<ov::element::Type>("dtype", element::f32);
start = std::make_shared<default_opset::Convert>(start, element::f32);
stop = std::make_shared<default_opset::Convert>(stop, element::f32);
// compute step value, i.e. distance between neighbor values of the result
Output<Node> step = std::make_shared<default_opset::Subtract>(stop, start); //[-1]
auto const_one = std::make_shared<default_opset::Constant>(element::i32, Shape{}, 1);
Output<Node> num_minus_one = std::make_shared<default_opset::Subtract>(num, const_one); //[3]
auto num_none_zero = std::make_shared<default_opset::Greater>(num_minus_one, const_one); //[ture]
num_minus_one = std::make_shared<default_opset::Select>(num_none_zero, num_minus_one, const_one);
num_minus_one = std::make_shared<default_opset::Convert>(num_minus_one, element::f32);
step = std::make_shared<default_opset::Divide>(step, num_minus_one); //[-1/3]
// generate a range of numbers [0, 1, ..., num)
auto const_zero = std::make_shared<default_opset::Constant>(element::i32, Shape{}, 0);
auto const_num = std::make_shared<default_opset::Squeeze>(num);
auto range0_n = std::make_shared<default_opset::Range>(const_zero, const_num, const_one, element::f32);
// compute the result
Output<Node> linspace = std::make_shared<default_opset::Multiply>(range0_n, step);
auto result = std::make_shared<default_opset::Add>(linspace, start);
if (dtype == element::i32) {
return node.default_single_output_mapping({std::make_shared<default_opset::Convert>(result, element::i32)},
{"Out"});
} else if (dtype == element::i64) {
return node.default_single_output_mapping({std::make_shared<default_opset::Convert>(result, element::i64)},
{"Out"});
} else {
return node.default_single_output_mapping({result}, {"Out"});
}
}
} // namespace op
} // namespace paddle
} // namespace frontend
} // namespace ov

View File

@ -58,6 +58,7 @@ OP_CONVERTER(layer_norm);
OP_CONVERTER(leaky_relu);
OP_CONVERTER(less_than);
OP_CONVERTER(linear_interp_v2);
OP_CONVERTER(linspace);
OP_CONVERTER(lod_array_length);
OP_CONVERTER(log);
OP_CONVERTER(logical_and);
@ -173,6 +174,7 @@ std::map<std::string, CreatorFunction> get_supported_ops() {
{"leaky_relu", op::leaky_relu},
{"less_than", op::less_than},
{"linear_interp_v2", op::linear_interp_v2},
{"linspace", op::linspace},
{"lod_array_length", op::lod_array_length},
{"log", op::log},
{"logical_and", op::logical_and},

View File

@ -259,6 +259,9 @@ static const std::vector<std::string> models{
std::string("linear_upsample_scales/linear_upsample_scales.pdmodel"),
std::string("linear_upsample_scales2/linear_upsample_scales2.pdmodel"),
std::string("linear_upsample_true_0/linear_upsample_true_0.pdmodel"),
std::string("linspace_1"),
std::string("linspace_2"),
std::string("linspace_3"),
std::string("log"),
std::string("logical_and"),
std::string("logical_not"),

View File

@ -0,0 +1,58 @@
# Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
# linspace paddle model generator
#
import numpy as np
from save_model import saveModel
import paddle
import random
import sys
data_type = "float32"
def linspace(name: str, start, stop, num, type='float32'):
paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):
data_start = paddle.static.data(name="Start", shape=[1], dtype=start.dtype)
data_stop = paddle.static.data(name="Stop", shape=[1], dtype=stop.dtype)
data_num = paddle.static.data(name="Num", shape=[1], dtype='int32')
out = paddle.linspace(data_start, data_stop, data_num, dtype=type)
cpu = paddle.static.cpu_places(1)
exe = paddle.static.Executor(cpu[0])
exe.run(paddle.static.default_startup_program())
outs = exe.run(feed={"Start": start, "Stop": stop, "Num": num}, fetch_list=[out])
saveModel(name, exe, feedkeys=["Start", "Stop", "Num"], fetchlist=[out], inputs=[start, stop, num],
outputs=[outs[0]], target_dir=sys.argv[1])
return outs[0]
def main():
# random test, float32
start = np.random.randn(1).astype(np.float32)
stop = np.random.randn(1).astype(np.float32)
num = np.random.randint(1, 5, size=1).astype(np.int32)
linspace("linspace_1", start, stop, num, "float32")
# int32 to float32
start = np.array([0]).astype(np.int32)
stop = np.array([1]).astype(np.int32)
num = np.array([4]).astype(np.int32)
linspace("linspace_2", start, stop, num, "float32")
# int64, start less than stop, minimal num = 1
start = np.array([-5]).astype(np.int64)
stop = np.array([-4]).astype(np.int64)
num = np.array([1]).astype(np.int32)
linspace("linspace_3", start, stop, num, "int64")
if __name__ == "__main__":
main()