diff --git a/src/frontends/paddle/src/op/linspace.cpp b/src/frontends/paddle/src/op/linspace.cpp new file mode 100644 index 00000000000..47c1be6c509 --- /dev/null +++ b/src/frontends/paddle/src/op/linspace.cpp @@ -0,0 +1,53 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "default_opset.hpp" +#include "openvino/frontend/paddle/node_context.hpp" +#include "openvino/frontend/paddle/visibility.hpp" + +namespace ov { +namespace frontend { +namespace paddle { +namespace op { +NamedOutputs linspace(const NodeContext& node) { + auto start = node.get_input("Start"); + auto stop = node.get_input("Stop"); + auto num = node.get_input("Num"); + auto dtype = node.get_attribute("dtype", element::f32); + + start = std::make_shared(start, element::f32); + stop = std::make_shared(stop, element::f32); + + // compute step value, i.e. distance between neighbor values of the result + Output step = std::make_shared(stop, start); //[-1] + auto const_one = std::make_shared(element::i32, Shape{}, 1); + Output num_minus_one = std::make_shared(num, const_one); //[3] + auto num_none_zero = std::make_shared(num_minus_one, const_one); //[ture] + num_minus_one = std::make_shared(num_none_zero, num_minus_one, const_one); + + num_minus_one = std::make_shared(num_minus_one, element::f32); + step = std::make_shared(step, num_minus_one); //[-1/3] + + // generate a range of numbers [0, 1, ..., num) + auto const_zero = std::make_shared(element::i32, Shape{}, 0); + auto const_num = std::make_shared(num); + auto range0_n = std::make_shared(const_zero, const_num, const_one, element::f32); + + // compute the result + Output linspace = std::make_shared(range0_n, step); + auto result = std::make_shared(linspace, start); + if (dtype == element::i32) { + return node.default_single_output_mapping({std::make_shared(result, element::i32)}, + {"Out"}); + } else if (dtype == element::i64) { + return node.default_single_output_mapping({std::make_shared(result, element::i64)}, + {"Out"}); + } else { + return node.default_single_output_mapping({result}, {"Out"}); + } +} +} // namespace op +} // namespace paddle +} // namespace frontend +} // namespace ov diff --git a/src/frontends/paddle/src/op_table.cpp b/src/frontends/paddle/src/op_table.cpp index 5fae145cc30..22c907fb800 100644 --- a/src/frontends/paddle/src/op_table.cpp +++ b/src/frontends/paddle/src/op_table.cpp @@ -58,6 +58,7 @@ OP_CONVERTER(layer_norm); OP_CONVERTER(leaky_relu); OP_CONVERTER(less_than); OP_CONVERTER(linear_interp_v2); +OP_CONVERTER(linspace); OP_CONVERTER(lod_array_length); OP_CONVERTER(log); OP_CONVERTER(logical_and); @@ -173,6 +174,7 @@ std::map get_supported_ops() { {"leaky_relu", op::leaky_relu}, {"less_than", op::less_than}, {"linear_interp_v2", op::linear_interp_v2}, + {"linspace", op::linspace}, {"lod_array_length", op::lod_array_length}, {"log", op::log}, {"logical_and", op::logical_and}, diff --git a/src/frontends/paddle/tests/op_fuzzy.cpp b/src/frontends/paddle/tests/op_fuzzy.cpp index 67551c3334a..f71e0524b17 100644 --- a/src/frontends/paddle/tests/op_fuzzy.cpp +++ b/src/frontends/paddle/tests/op_fuzzy.cpp @@ -259,6 +259,9 @@ static const std::vector models{ std::string("linear_upsample_scales/linear_upsample_scales.pdmodel"), std::string("linear_upsample_scales2/linear_upsample_scales2.pdmodel"), std::string("linear_upsample_true_0/linear_upsample_true_0.pdmodel"), + std::string("linspace_1"), + std::string("linspace_2"), + std::string("linspace_3"), std::string("log"), std::string("logical_and"), std::string("logical_not"), diff --git a/src/frontends/paddle/tests/test_models/gen_scripts/generate_linspace.py b/src/frontends/paddle/tests/test_models/gen_scripts/generate_linspace.py new file mode 100644 index 00000000000..d4c636b64e5 --- /dev/null +++ b/src/frontends/paddle/tests/test_models/gen_scripts/generate_linspace.py @@ -0,0 +1,58 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +# +# linspace paddle model generator +# +import numpy as np +from save_model import saveModel +import paddle +import random +import sys + +data_type = "float32" + + +def linspace(name: str, start, stop, num, type='float32'): + paddle.enable_static() + + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + data_start = paddle.static.data(name="Start", shape=[1], dtype=start.dtype) + data_stop = paddle.static.data(name="Stop", shape=[1], dtype=stop.dtype) + data_num = paddle.static.data(name="Num", shape=[1], dtype='int32') + + out = paddle.linspace(data_start, data_stop, data_num, dtype=type) + + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) + + exe.run(paddle.static.default_startup_program()) + + outs = exe.run(feed={"Start": start, "Stop": stop, "Num": num}, fetch_list=[out]) + + saveModel(name, exe, feedkeys=["Start", "Stop", "Num"], fetchlist=[out], inputs=[start, stop, num], + outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + + +def main(): + # random test, float32 + start = np.random.randn(1).astype(np.float32) + stop = np.random.randn(1).astype(np.float32) + num = np.random.randint(1, 5, size=1).astype(np.int32) + linspace("linspace_1", start, stop, num, "float32") + # int32 to float32 + start = np.array([0]).astype(np.int32) + stop = np.array([1]).astype(np.int32) + num = np.array([4]).astype(np.int32) + linspace("linspace_2", start, stop, num, "float32") + # int64, start less than stop, minimal num = 1 + start = np.array([-5]).astype(np.int64) + stop = np.array([-4]).astype(np.int64) + num = np.array([1]).astype(np.int32) + linspace("linspace_3", start, stop, num, "int64") + + +if __name__ == "__main__": + main()