diff --git a/src/core/tests/frontend/paddle/op_fuzzy.cpp b/src/core/tests/frontend/paddle/op_fuzzy.cpp index 5fbd866b4d4..4161024ccaa 100644 --- a/src/core/tests/frontend/paddle/op_fuzzy.cpp +++ b/src/core/tests/frontend/paddle/op_fuzzy.cpp @@ -155,7 +155,10 @@ static const std::vector models{std::string("argmax"), std::string("linear_upsample_scales2"), std::string("linear_upsample_true_0"), std::string("log"), + std::string("logical_and"), std::string("logical_not"), + std::string("logical_or"), + std::string("logical_xor"), std::string("matmul_xt"), std::string("matmul_xt_yt"), std::string("matmul_yt"), @@ -198,6 +201,36 @@ static const std::vector models{std::string("argmax"), std::string("range0"), std::string("range1"), std::string("range2"), + std::string("reduce_max_test_0"), + std::string("reduce_max_test_1"), + std::string("reduce_max_test_2"), + std::string("reduce_max_test_3"), + std::string("reduce_max_test_4"), + std::string("reduce_max_test_5"), + std::string("reduce_mean_test_0"), + std::string("reduce_mean_test_1"), + std::string("reduce_mean_test_2"), + std::string("reduce_mean_test_3"), + std::string("reduce_mean_test_4"), + std::string("reduce_mean_test_5"), + std::string("reduce_min_test_0"), + std::string("reduce_min_test_1"), + std::string("reduce_min_test_2"), + std::string("reduce_min_test_3"), + std::string("reduce_min_test_4"), + std::string("reduce_min_test_5"), + std::string("reduce_prod_test_0"), + std::string("reduce_prod_test_1"), + std::string("reduce_prod_test_2"), + std::string("reduce_prod_test_3"), + std::string("reduce_prod_test_4"), + std::string("reduce_prod_test_5"), + std::string("reduce_sum_test_0"), + std::string("reduce_sum_test_1"), + std::string("reduce_sum_test_2"), + std::string("reduce_sum_test_3"), + std::string("reduce_sum_test_4"), + std::string("reduce_sum_test_5"), std::string("relu"), std::string("relu6"), std::string("relu6_1"), diff --git a/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_logical_and.py b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_logical_and.py new file mode 100644 index 00000000000..71589293e4d --- /dev/null +++ b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_logical_and.py @@ -0,0 +1,50 @@ +# Copyright (C) 2018-2022 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +# +# logical_and paddle model generator +# +import numpy as np +from save_model import saveModel +import sys + +def equal_logical_and(name : str, x, y, z): + import paddle + paddle.enable_static() + + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + node_x = paddle.static.data(name='x', shape=x.shape, dtype='float32') + node_y = paddle.static.data(name='y', shape=y.shape, dtype='float32') + node_z = paddle.static.data(name='z', shape=z.shape, dtype='float32') + + bool_x = paddle.equal(node_x, node_y) + bool_y = paddle.equal(node_x, node_z) + + out = paddle.logical_and(bool_x, bool_y) + out = paddle.cast(out, x.dtype) + + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(paddle.static.default_startup_program()) + + outs = exe.run( + feed={'x': x, 'y': y, 'z': z}, + fetch_list=[out]) + + saveModel(name, exe, feedkeys=['x', 'y', 'z'], fetchlist=[out], + inputs=[x, y, z], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + + +def main(): + data_x = np.array([[[[-1, 0, 1]], [[2, 3, 4]]]]).astype(np.float32) + data_y = np.array([[[[2, 0, 3]], [[3, 1, 4]]]]).astype(np.float32) + data_z = np.array([[[[1, 0, 5]], [[2, 1, 0]]]]).astype(np.float32) + + equal_logical_and("logical_and", data_x, data_y, data_z) + + +if __name__ == "__main__": + main() diff --git a/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_logical_not.py b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_logical_not.py index 485a911021a..8078cdb8ddc 100644 --- a/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_logical_not.py +++ b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_logical_not.py @@ -2,7 +2,7 @@ # SPDX-License-Identifier: Apache-2.0 # -# pool2d paddle model generator +# logical_not paddle model generator # import numpy as np from save_model import saveModel @@ -13,24 +13,25 @@ def equal_logical_not(name : str, x, y): import paddle paddle.enable_static() - node_x = paddle.static.data(name='x', shape=x.shape, dtype='float32') - node_y = paddle.static.data(name='y', shape=y.shape, dtype='float32') + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + node_x = paddle.static.data(name='x', shape=x.shape, dtype='float32') + node_y = paddle.static.data(name='y', shape=y.shape, dtype='float32') - out = paddle.equal(node_x, node_y) - out = paddle.logical_not(out) - out = paddle.cast(out, np.float32) + out = paddle.equal(node_x, node_y) + out = paddle.logical_not(out) + out = paddle.cast(out, np.float32) - cpu = paddle.static.cpu_places(1) - exe = paddle.static.Executor(cpu[0]) - # startup program will call initializer to initialize the parameters. - exe.run(paddle.static.default_startup_program()) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(paddle.static.default_startup_program()) - outs = exe.run( - feed={'x': x, 'y': y}, - fetch_list=[out]) + outs = exe.run( + feed={'x': x, 'y': y}, + fetch_list=[out]) - saveModel(name, exe, feedkeys=['x', 'y'], fetchlist=[out], - inputs=[x, y], outputs=[outs[0]], target_dir=sys.argv[1]) + saveModel(name, exe, feedkeys=['x', 'y'], fetchlist=[out], + inputs=[x, y], outputs=[outs[0]], target_dir=sys.argv[1]) return outs[0] diff --git a/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_logical_or.py b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_logical_or.py new file mode 100644 index 00000000000..a3f00e1e483 --- /dev/null +++ b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_logical_or.py @@ -0,0 +1,52 @@ +# Copyright (C) 2018-2022 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +# +# logical_or paddle model generator +# +import numpy as np +from save_model import saveModel +import sys + + +def equal_logical_or(name : str, x, y, z): + import paddle + paddle.enable_static() + + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + node_x = paddle.static.data(name='x', shape=x.shape, dtype='float32') + node_y = paddle.static.data(name='y', shape=y.shape, dtype='float32') + node_z = paddle.static.data(name='z', shape=z.shape, dtype='float32') + + bool_x = paddle.equal(node_x, node_y) + bool_y = paddle.equal(node_x, node_z) + + out = paddle.logical_and(bool_x, bool_y) + out = paddle.cast(out, x.dtype) + + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(paddle.static.default_startup_program()) + + outs = exe.run( + feed={'x': x, 'y': y, 'z': z}, + fetch_list=[out]) + + saveModel(name, exe, feedkeys=['x', 'y', 'z'], fetchlist=[out], + inputs=[x, y, z], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + + +def main(): + data_x = np.array([[[[-1, 0, 1]], [[2, 3, 4]]]]).astype(np.float32) + data_y = np.array([[[[2, 0, 3]], [[3, 1, 4]]]]).astype(np.float32) + data_z = np.array([[[[1, 0, 5]], [[2, 1, 0]]]]).astype(np.float32) + + equal_logical_or("logical_or", data_x, data_y, data_z) + + + +if __name__ == "__main__": + main() diff --git a/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_logical_xor.py b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_logical_xor.py new file mode 100644 index 00000000000..cefe58454ff --- /dev/null +++ b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_logical_xor.py @@ -0,0 +1,52 @@ +# Copyright (C) 2018-2022 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +# +# logical_xor paddle model generator +# +import numpy as np +from save_model import saveModel +import sys + + +def equal_logical_xor(name : str, x, y, z): + import paddle + paddle.enable_static() + + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + node_x = paddle.static.data(name='x', shape=x.shape, dtype='float32') + node_y = paddle.static.data(name='y', shape=y.shape, dtype='float32') + node_z = paddle.static.data(name='z', shape=z.shape, dtype='float32') + + bool_x = paddle.equal(node_x, node_y) + bool_y = paddle.equal(node_x, node_z) + + out = paddle.logical_and(bool_x, bool_y) + out = paddle.cast(out, x.dtype) + + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(paddle.static.default_startup_program()) + + outs = exe.run( + feed={'x': x, 'y': y, 'z': z}, + fetch_list=[out]) + + saveModel(name, exe, feedkeys=['x', 'y', 'z'], fetchlist=[out], + inputs=[x, y, z], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + + +def main(): + data_x = np.array([[[[-1, 0, 1]], [[2, 3, 4]]]]).astype(np.float32) + data_y = np.array([[[[2, 0, 3]], [[3, 1, 4]]]]).astype(np.float32) + data_z = np.array([[[[1, 0, 5]], [[2, 1, 0]]]]).astype(np.float32) + + equal_logical_xor("logical_xor", data_x, data_y, data_z) + + + +if __name__ == "__main__": + main() diff --git a/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_reduce_max.py b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_reduce_max.py new file mode 100644 index 00000000000..83622e2f96d --- /dev/null +++ b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_reduce_max.py @@ -0,0 +1,46 @@ +# Copyright (C) 2018-2022 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +# +# reduce_max paddle model generator +# + +import numpy as np +import sys +from save_model import saveModel + + +def reduce_max(name : str, x, axis=None, keepdim=False): + import paddle + paddle.enable_static() + + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + data_x = paddle.static.data(name='x', shape=x.shape, dtype=x.dtype) + out = paddle.max(data_x, axis=axis, keepdim=keepdim) + + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) + + # startup program will call initializer to initialize the parameters. + exe.run(paddle.static.default_startup_program()) + outs = exe.run( + feed={'x': x}, + fetch_list=[out]) + saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + + +def main(): + data = np.array([[[1.0,2.0], [3.0, 4.0]], [[5.0,6.0], [7.0, 8.0]]]).astype(np.float32) + + reduce_max("reduce_max_test_0", data) + reduce_max("reduce_max_test_1", data, axis=0, keepdim=False) + reduce_max("reduce_max_test_2", data, axis=-1, keepdim=False) + reduce_max("reduce_max_test_3", data, axis=1, keepdim=True) + reduce_max("reduce_max_test_4", data, axis=[1,2], keepdim=False) + reduce_max("reduce_max_test_5", data, axis=[0,1], keepdim=True) + + +if __name__ == "__main__": + main() diff --git a/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_reduce_mean.py b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_reduce_mean.py new file mode 100644 index 00000000000..1a73553a24f --- /dev/null +++ b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_reduce_mean.py @@ -0,0 +1,46 @@ +# Copyright (C) 2018-2022 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +# +# reduce_mean paddle model generator +# + +import numpy as np +import sys +from save_model import saveModel + + +def reduce_mean(name : str, x, axis=None, keepdim=False): + import paddle + paddle.enable_static() + + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + data_x = paddle.static.data(name='x', shape=x.shape, dtype=x.dtype) + out = paddle.mean(data_x, axis=axis, keepdim=keepdim) + + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) + + # startup program will call initializer to initialize the parameters. + exe.run(paddle.static.default_startup_program()) + outs = exe.run( + feed={'x': x}, + fetch_list=[out]) + saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + + +def main(): + data = np.array([[[1.0,2.0], [3.0, 4.0]], [[5.0,6.0], [7.0, 8.0]]]).astype(np.float32) + + reduce_mean("reduce_mean_test_0", data) + reduce_mean("reduce_mean_test_1", data, axis=0, keepdim=False) + reduce_mean("reduce_mean_test_2", data, axis=-1, keepdim=False) + reduce_mean("reduce_mean_test_3", data, axis=1, keepdim=True) + reduce_mean("reduce_mean_test_4", data, axis=[1,2], keepdim=False) + reduce_mean("reduce_mean_test_5", data, axis=[0,1], keepdim=True) + + +if __name__ == "__main__": + main() diff --git a/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_reduce_min.py b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_reduce_min.py new file mode 100644 index 00000000000..fd7a989ddba --- /dev/null +++ b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_reduce_min.py @@ -0,0 +1,46 @@ +# Copyright (C) 2018-2022 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +# +# reduce_min paddle model generator +# + +import numpy as np +import sys +from save_model import saveModel + + +def reduce_min(name : str, x, axis=None, keepdim=False): + import paddle + paddle.enable_static() + + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + data_x = paddle.static.data(name='x', shape=x.shape, dtype=x.dtype) + out = paddle.min(data_x, axis=axis, keepdim=keepdim) + + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) + + # startup program will call initializer to initialize the parameters. + exe.run(paddle.static.default_startup_program()) + outs = exe.run( + feed={'x': x}, + fetch_list=[out]) + saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + + +def main(): + data = np.array([[[1.0,2.0], [3.0, 4.0]], [[5.0,6.0], [7.0, 8.0]]]).astype(np.float32) + + reduce_min("reduce_min_test_0", data) + reduce_min("reduce_min_test_1", data, axis=0, keepdim=False) + reduce_min("reduce_min_test_2", data, axis=-1, keepdim=False) + reduce_min("reduce_min_test_3", data, axis=1, keepdim=True) + reduce_min("reduce_min_test_4", data, axis=[1,2], keepdim=False) + reduce_min("reduce_min_test_5", data, axis=[0,1], keepdim=True) + + +if __name__ == "__main__": + main() diff --git a/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_reduce_prod.py b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_reduce_prod.py new file mode 100644 index 00000000000..7c379216673 --- /dev/null +++ b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_reduce_prod.py @@ -0,0 +1,46 @@ +# Copyright (C) 2018-2022 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +# +# reduce_prod paddle model generator +# + +import numpy as np +import sys +from save_model import saveModel + + +def reduce_prod(name : str, x, axis=None, keepdim=False): + import paddle + paddle.enable_static() + + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + data_x = paddle.static.data(name='x', shape=x.shape, dtype=x.dtype) + out = paddle.prod(data_x, axis=axis, keepdim=keepdim) + + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) + + # startup program will call initializer to initialize the parameters. + exe.run(paddle.static.default_startup_program()) + outs = exe.run( + feed={'x': x}, + fetch_list=[out]) + saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + + +def main(): + data = np.array([[[1.0,2.0], [3.0, 4.0]], [[5.0,6.0], [7.0, 8.0]]]).astype(np.float32) + + reduce_prod("reduce_prod_test_0", data) + reduce_prod("reduce_prod_test_1", data, axis=0, keepdim=False) + reduce_prod("reduce_prod_test_2", data, axis=-1, keepdim=False) + reduce_prod("reduce_prod_test_3", data, axis=1, keepdim=True) + reduce_prod("reduce_prod_test_4", data, axis=[1,2], keepdim=False) + reduce_prod("reduce_prod_test_5", data, axis=[0,1], keepdim=True) + + +if __name__ == "__main__": + main() diff --git a/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_reduce_sum.py b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_reduce_sum.py new file mode 100644 index 00000000000..ac6c3a6dd95 --- /dev/null +++ b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_reduce_sum.py @@ -0,0 +1,46 @@ +# Copyright (C) 2018-2022 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +# +# reduce_sum paddle model generator +# + +import numpy as np +import sys +from save_model import saveModel + + +def reduce_sum(name : str, x, axis=None, keepdim=False): + import paddle + paddle.enable_static() + + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + data_x = paddle.static.data(name='x', shape=x.shape, dtype=x.dtype) + out = paddle.sum(data_x, axis=axis, keepdim=keepdim) + + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) + + # startup program will call initializer to initialize the parameters. + exe.run(paddle.static.default_startup_program()) + outs = exe.run( + feed={'x': x}, + fetch_list=[out]) + saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + + +def main(): + data = np.array([[[1.0,2.0], [3.0, 4.0]], [[5.0,6.0], [7.0, 8.0]]]).astype(np.float32) + + reduce_sum("reduce_sum_test_0", data) + reduce_sum("reduce_sum_test_1", data, axis=0, keepdim=False) + reduce_sum("reduce_sum_test_2", data, axis=-1, keepdim=False) + reduce_sum("reduce_sum_test_3", data, axis=1, keepdim=True) + reduce_sum("reduce_sum_test_4", data, axis=[1,2], keepdim=False) + reduce_sum("reduce_sum_test_5", data, axis=[0,1], keepdim=True) + + +if __name__ == "__main__": + main() diff --git a/src/frontends/paddle/src/op/logical_and.cpp b/src/frontends/paddle/src/op/logical_and.cpp new file mode 100644 index 00000000000..77e3c9b8b16 --- /dev/null +++ b/src/frontends/paddle/src/op/logical_and.cpp @@ -0,0 +1,20 @@ +// Copyright (C) 2018-2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "default_opset.hpp" +#include "openvino/frontend/paddle/node_context.hpp" + +namespace ov { +namespace frontend { +namespace paddle { +namespace op { +NamedOutputs logical_and(const NodeContext& node) { + auto x = node.get_input("X"); + auto y = node.get_input("Y"); + return node.default_single_output_mapping({std::make_shared(x, y)}, {"Out"}); +} +} // namespace op +} // namespace paddle +} // namespace frontend +} // namespace ov diff --git a/src/frontends/paddle/src/op/logical_or.cpp b/src/frontends/paddle/src/op/logical_or.cpp new file mode 100644 index 00000000000..1857cc6a1e1 --- /dev/null +++ b/src/frontends/paddle/src/op/logical_or.cpp @@ -0,0 +1,20 @@ +// Copyright (C) 2018-2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "default_opset.hpp" +#include "openvino/frontend/paddle/node_context.hpp" + +namespace ov { +namespace frontend { +namespace paddle { +namespace op { +NamedOutputs logical_or(const NodeContext& node) { + auto x = node.get_input("X"); + auto y = node.get_input("Y"); + return node.default_single_output_mapping({std::make_shared(x, y)}, {"Out"}); +} +} // namespace op +} // namespace paddle +} // namespace frontend +} // namespace ov diff --git a/src/frontends/paddle/src/op/logical_xor.cpp b/src/frontends/paddle/src/op/logical_xor.cpp new file mode 100644 index 00000000000..70cf05164e9 --- /dev/null +++ b/src/frontends/paddle/src/op/logical_xor.cpp @@ -0,0 +1,20 @@ +// Copyright (C) 2018-2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "default_opset.hpp" +#include "openvino/frontend/paddle/node_context.hpp" + +namespace ov { +namespace frontend { +namespace paddle { +namespace op { +NamedOutputs logical_xor(const NodeContext& node) { + auto x = node.get_input("X"); + auto y = node.get_input("Y"); + return node.default_single_output_mapping({std::make_shared(x, y)}, {"Out"}); +} +} // namespace op +} // namespace paddle +} // namespace frontend +} // namespace ov diff --git a/src/frontends/paddle/src/op/reduce_max.cpp b/src/frontends/paddle/src/op/reduce_max.cpp new file mode 100644 index 00000000000..3c433379509 --- /dev/null +++ b/src/frontends/paddle/src/op/reduce_max.cpp @@ -0,0 +1,17 @@ +// Copyright (C) 2018-2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "reduce_ops.hpp" + +namespace ov { +namespace frontend { +namespace paddle { +namespace op { +NamedOutputs reduce_max(const NodeContext& node_context) { + return reduce_ops(node_context); +} +} // namespace op +} // namespace paddle +} // namespace frontend +} // namespace ov diff --git a/src/frontends/paddle/src/op/reduce_mean.cpp b/src/frontends/paddle/src/op/reduce_mean.cpp new file mode 100644 index 00000000000..f9a719fd91a --- /dev/null +++ b/src/frontends/paddle/src/op/reduce_mean.cpp @@ -0,0 +1,17 @@ +// Copyright (C) 2018-2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "reduce_ops.hpp" + +namespace ov { +namespace frontend { +namespace paddle { +namespace op { +NamedOutputs reduce_mean(const NodeContext& node_context) { + return reduce_ops(node_context); +} +} // namespace op +} // namespace paddle +} // namespace frontend +} // namespace ov diff --git a/src/frontends/paddle/src/op/reduce_min.cpp b/src/frontends/paddle/src/op/reduce_min.cpp new file mode 100644 index 00000000000..d1c2e67b236 --- /dev/null +++ b/src/frontends/paddle/src/op/reduce_min.cpp @@ -0,0 +1,17 @@ +// Copyright (C) 2018-2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "reduce_ops.hpp" + +namespace ov { +namespace frontend { +namespace paddle { +namespace op { +NamedOutputs reduce_min(const NodeContext& node_context) { + return reduce_ops(node_context); +} +} // namespace op +} // namespace paddle +} // namespace frontend +} // namespace ov diff --git a/src/frontends/paddle/src/op/reduce_ops.hpp b/src/frontends/paddle/src/op/reduce_ops.hpp new file mode 100644 index 00000000000..076d81b2b01 --- /dev/null +++ b/src/frontends/paddle/src/op/reduce_ops.hpp @@ -0,0 +1,33 @@ +// Copyright (C) 2018-2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "default_opset.hpp" +#include "openvino/frontend/paddle/node_context.hpp" + +namespace ov { +namespace frontend { +namespace paddle { +namespace op { + +template +NamedOutputs reduce_ops(const NodeContext& node) { + auto x = node.get_input("X"); + auto keep_dim = node.get_attribute("keep_dim"); + auto reduce_all = node.get_attribute("reduce_all", false); + PADDLE_OP_CHECK(node, x.get_partial_shape().rank().is_static(), "reduce_ops: X rank must be static!"); + int64_t input_rank = x.get_partial_shape().rank().get_length(); + std::vector dims(input_rank); + if (reduce_all) { + std::iota(dims.begin(), dims.end(), 0); + } else { + dims = node.get_attribute>("dim"); + } + auto axesNode = default_opset::Constant::create(ngraph::element::i32, {dims.size()}, dims); + return node.default_single_output_mapping({std::make_shared(x, axesNode, keep_dim)}, {"Out"}); +} + +} // namespace op +} // namespace paddle +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/paddle/src/op/reduce_prod.cpp b/src/frontends/paddle/src/op/reduce_prod.cpp new file mode 100644 index 00000000000..2724f52f66b --- /dev/null +++ b/src/frontends/paddle/src/op/reduce_prod.cpp @@ -0,0 +1,17 @@ +// Copyright (C) 2018-2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "reduce_ops.hpp" + +namespace ov { +namespace frontend { +namespace paddle { +namespace op { +NamedOutputs reduce_prod(const NodeContext& node_context) { + return reduce_ops(node_context); +} +} // namespace op +} // namespace paddle +} // namespace frontend +} // namespace ov diff --git a/src/frontends/paddle/src/op/reduce_sum.cpp b/src/frontends/paddle/src/op/reduce_sum.cpp new file mode 100644 index 00000000000..bab84198d68 --- /dev/null +++ b/src/frontends/paddle/src/op/reduce_sum.cpp @@ -0,0 +1,17 @@ +// Copyright (C) 2018-2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "reduce_ops.hpp" + +namespace ov { +namespace frontend { +namespace paddle { +namespace op { +NamedOutputs reduce_sum(const NodeContext& node_context) { + return reduce_ops(node_context); +} +} // namespace op +} // namespace paddle +} // namespace frontend +} // namespace ov diff --git a/src/frontends/paddle/src/op_table.cpp b/src/frontends/paddle/src/op_table.cpp index bb7472285e7..770ed7adfc3 100644 --- a/src/frontends/paddle/src/op_table.cpp +++ b/src/frontends/paddle/src/op_table.cpp @@ -44,7 +44,10 @@ OP_CONVERTER(layer_norm); OP_CONVERTER(leaky_relu); OP_CONVERTER(linear_interp_v2); OP_CONVERTER(log); +OP_CONVERTER(logical_and); OP_CONVERTER(logical_not); +OP_CONVERTER(logical_or); +OP_CONVERTER(logical_xor); OP_CONVERTER(matmul); OP_CONVERTER(matmul_v2); OP_CONVERTER(matrix_nms); @@ -55,6 +58,11 @@ OP_CONVERTER(pow); OP_CONVERTER(pool2d); OP_CONVERTER(prior_box); OP_CONVERTER(range); +OP_CONVERTER(reduce_max); +OP_CONVERTER(reduce_mean); +OP_CONVERTER(reduce_min); +OP_CONVERTER(reduce_prod); +OP_CONVERTER(reduce_sum); OP_CONVERTER(relu); OP_CONVERTER(relu6); OP_CONVERTER(reshape2); @@ -115,7 +123,10 @@ std::map get_supported_ops() { {"leaky_relu", op::leaky_relu}, {"linear_interp_v2", op::linear_interp_v2}, {"log", op::log}, + {"logical_and", op::logical_and}, {"logical_not", op::logical_not}, + {"logical_or", op::logical_or}, + {"logical_xor", op::logical_xor}, {"lookup_table_v2", op::embedding}, {"matmul", op::matmul}, {"matmul_v2", op::matmul_v2}, @@ -129,6 +140,11 @@ std::map get_supported_ops() { {"pool2d", op::pool2d}, {"prior_box", op::prior_box}, {"range", op::range}, + {"reduce_max", op::reduce_max}, + {"reduce_mean", op::reduce_mean}, + {"reduce_min", op::reduce_min}, + {"reduce_prod", op::reduce_prod}, + {"reduce_sum", op::reduce_sum}, {"relu", op::relu}, {"relu6", op::relu6}, {"reshape2", op::reshape2},