diff --git a/src/core/tests/frontend/paddlepaddle/op_fuzzy.cpp b/src/core/tests/frontend/paddlepaddle/op_fuzzy.cpp index 7b126c33864..f25b2e6fc64 100644 --- a/src/core/tests/frontend/paddlepaddle/op_fuzzy.cpp +++ b/src/core/tests/frontend/paddlepaddle/op_fuzzy.cpp @@ -87,6 +87,27 @@ static const std::vector models{std::string("argmax"), std::string("elementwise_mul1"), std::string("elementwise_pow1"), std::string("elementwise_sub1"), + std::string("elementwise_add2"), + std::string("elementwise_div2"), + std::string("elementwise_max2"), + std::string("elementwise_min2"), + std::string("elementwise_mul2"), + std::string("elementwise_pow2"), + std::string("elementwise_sub2"), + std::string("elementwise_add3"), + std::string("elementwise_div3"), + std::string("elementwise_max3"), + std::string("elementwise_min3"), + std::string("elementwise_mul3"), + std::string("elementwise_pow3"), + std::string("elementwise_sub3"), + std::string("elementwise_add4"), + std::string("elementwise_div4"), + std::string("elementwise_max4"), + std::string("elementwise_min4"), + std::string("elementwise_mul4"), + std::string("elementwise_pow4"), + std::string("elementwise_sub4"), std::string("embedding_0"), std::string("embedding_sparse"), std::string("embedding_none_weight"), diff --git a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_elementwise_ops.py b/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_elementwise_ops.py index 13a08af86ca..f1592434631 100644 --- a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_elementwise_ops.py +++ b/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_elementwise_ops.py @@ -6,14 +6,14 @@ import sys from save_model import saveModel -def elementwise_add(name : str, x, y, in_dtype): +def elementwise_add(name : str, x, y, axis, in_dtype): import paddle as pdpd pdpd.enable_static() with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): node_x = pdpd.static.data(name='x', shape=x.shape, dtype=in_dtype) node_y = pdpd.static.data(name='y', shape=y.shape, dtype=in_dtype) - out = pdpd.fluid.layers.nn.elementwise_add(node_x, node_y) + out = pdpd.fluid.layers.nn.elementwise_add(node_x, node_y, axis=axis) cpu = pdpd.static.cpu_places(1) exe = pdpd.static.Executor(cpu[0]) @@ -28,14 +28,14 @@ def elementwise_add(name : str, x, y, in_dtype): return outs[0] -def elementwise_sub(name : str, x, y, in_dtype): +def elementwise_sub(name : str, x, y, axis, in_dtype): import paddle as pdpd pdpd.enable_static() with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): node_x = pdpd.static.data(name='x', shape=x.shape, dtype=in_dtype) node_y = pdpd.static.data(name='y', shape=y.shape, dtype=in_dtype) - out = pdpd.fluid.layers.nn.elementwise_sub(node_x, node_y) + out = pdpd.fluid.layers.nn.elementwise_sub(node_x, node_y, axis=axis) cpu = pdpd.static.cpu_places(1) exe = pdpd.static.Executor(cpu[0]) @@ -50,14 +50,14 @@ def elementwise_sub(name : str, x, y, in_dtype): return outs[0] -def elementwise_div(name : str, x, y, in_dtype): +def elementwise_div(name : str, x, y, axis, in_dtype): import paddle as pdpd pdpd.enable_static() with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): node_x = pdpd.static.data(name = 'x', shape = x.shape, dtype = in_dtype) node_y = pdpd.static.data(name = 'y', shape = y.shape, dtype = in_dtype) - out = pdpd.fluid.layers.nn.elementwise_div(node_x, node_y) + out = pdpd.fluid.layers.nn.elementwise_div(node_x, node_y, axis=axis) cpu = pdpd.static.cpu_places(1) exe = pdpd.static.Executor(cpu[0]) @@ -72,14 +72,14 @@ def elementwise_div(name : str, x, y, in_dtype): return outs[0] -def elementwise_mul(name : str, x, y, in_dtype): +def elementwise_mul(name : str, x, y, axis, in_dtype): import paddle as pdpd pdpd.enable_static() with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): node_x = pdpd.static.data(name = 'x', shape = x.shape, dtype = in_dtype) node_y = pdpd.static.data(name = 'y', shape = y.shape, dtype = in_dtype) - out = pdpd.fluid.layers.nn.elementwise_mul(node_x, node_y) + out = pdpd.fluid.layers.nn.elementwise_mul(node_x, node_y, axis=axis) cpu = pdpd.static.cpu_places(1) exe = pdpd.static.Executor(cpu[0]) @@ -94,14 +94,14 @@ def elementwise_mul(name : str, x, y, in_dtype): return outs[0] -def elementwise_min(name : str, x, y, in_dtype): +def elementwise_min(name : str, x, y, axis, in_dtype): import paddle as pdpd pdpd.enable_static() with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): node_x = pdpd.static.data(name = 'x', shape = x.shape, dtype = in_dtype) node_y = pdpd.static.data(name = 'y', shape = y.shape, dtype = in_dtype) - out = pdpd.fluid.layers.nn.elementwise_min(node_x, node_y) + out = pdpd.fluid.layers.nn.elementwise_min(node_x, node_y, axis=axis) cpu = pdpd.static.cpu_places(1) exe = pdpd.static.Executor(cpu[0]) @@ -116,14 +116,14 @@ def elementwise_min(name : str, x, y, in_dtype): return outs[0] -def elementwise_max(name : str, x, y, in_dtype): +def elementwise_max(name : str, x, y, axis, in_dtype): import paddle as pdpd pdpd.enable_static() with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): node_x = pdpd.static.data(name = 'x', shape = x.shape, dtype = in_dtype) node_y = pdpd.static.data(name = 'y', shape = y.shape, dtype = in_dtype) - out = pdpd.fluid.layers.nn.elementwise_max(node_x, node_y) + out = pdpd.fluid.layers.nn.elementwise_max(node_x, node_y, axis=axis) cpu = pdpd.static.cpu_places(1) exe = pdpd.static.Executor(cpu[0]) @@ -138,14 +138,14 @@ def elementwise_max(name : str, x, y, in_dtype): return outs[0] -def elementwise_pow(name : str, x, y, in_dtype): +def elementwise_pow(name : str, x, y, axis, in_dtype): import paddle as pdpd pdpd.enable_static() with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): node_x = pdpd.static.data(name = 'x', shape = x.shape, dtype = in_dtype) node_y = pdpd.static.data(name = 'y', shape = y.shape, dtype = in_dtype) - out = pdpd.fluid.layers.nn.elementwise_pow(node_x, node_y) + out = pdpd.fluid.layers.nn.elementwise_pow(node_x, node_y, axis=axis) cpu = pdpd.static.cpu_places(1) exe = pdpd.static.Executor(cpu[0]) @@ -159,21 +159,36 @@ def elementwise_pow(name : str, x, y, in_dtype): return outs[0] +def elementwise_ops(name : str, data_x, data_y, axis, in_dtype): + elementwise_add("elementwise_add" + name, data_x, data_y, axis, in_dtype) + elementwise_sub("elementwise_sub" + name, data_x, data_y, axis, in_dtype) + elementwise_div("elementwise_div" + name, data_x, data_y, axis, in_dtype) + elementwise_mul("elementwise_mul" + name, data_x, data_y, axis, in_dtype) + elementwise_min("elementwise_min" + name, data_x, data_y, axis, in_dtype) + elementwise_max("elementwise_max" + name, data_x, data_y, axis, in_dtype) + elementwise_pow("elementwise_pow" + name, data_x, data_y, axis, in_dtype) + def main(): in_dtype = 'float32' data_x = np.array([2, 3, 4]).astype(in_dtype) data_y = np.array([1, 5, 2]).astype(in_dtype) + axis = -1 + elementwise_ops("1", data_x, data_y, axis, in_dtype) - elementwise_add("elementwise_add1", data_x, data_y, in_dtype) - elementwise_sub("elementwise_sub1", data_x, data_y, in_dtype) - elementwise_div("elementwise_div1", data_x, data_y, in_dtype) - elementwise_mul("elementwise_mul1", data_x, data_y, in_dtype) - elementwise_min("elementwise_min1", data_x, data_y, in_dtype) - elementwise_max("elementwise_max1", data_x, data_y, in_dtype) - elementwise_pow("elementwise_pow1", data_x, data_y, in_dtype) + # data_y's shape is the continuous subsequence of data_x's shape + data_x = np.random.rand(2, 5, 3, 4).astype(np.float32) + data_y = (0.1 + np.random.rand(3, 4).astype(np.float32)) / 1.1 + elementwise_ops("2", data_x, data_y, axis, in_dtype) + data_y = (0.1 + np.random.rand(5).astype(np.float32)) / 1.1 + axis = 1 + elementwise_ops("3", data_x, data_y, axis, in_dtype) + + data_y = (0.1 + np.random.rand(2, 5, 3).astype(np.float32)) / 1.1 + axis = 0 + elementwise_ops("4", data_x, data_y, axis, in_dtype) if __name__ == "__main__": main() diff --git a/src/frontends/paddlepaddle/CMakeLists.txt b/src/frontends/paddlepaddle/CMakeLists.txt index 71c82e3cdc7..b3d46d67d1a 100644 --- a/src/frontends/paddlepaddle/CMakeLists.txt +++ b/src/frontends/paddlepaddle/CMakeLists.txt @@ -5,4 +5,5 @@ ov_add_frontend(NAME paddlepaddle LINKABLE_FRONTEND PROTOBUF_LITE - FILEDESCRIPTION "FrontEnd to load and convert PaddlePaddle file format") + FILEDESCRIPTION "FrontEnd to load and convert PaddlePaddle file format" + LINK_LIBRARIES inference_engine_transformations) diff --git a/src/frontends/paddlepaddle/src/op/conv2d_utils.cpp b/src/frontends/paddlepaddle/src/op/conv2d_utils.cpp index d1b7689d561..7155b896514 100644 --- a/src/frontends/paddlepaddle/src/op/conv2d_utils.cpp +++ b/src/frontends/paddlepaddle/src/op/conv2d_utils.cpp @@ -6,6 +6,7 @@ #include "node_context.hpp" #include "openvino/opsets/opset6.hpp" +#include "transformations/utils/utils.hpp" namespace ov { namespace frontend { @@ -64,28 +65,21 @@ std::pair get_pads(const NodeContext& node) { return get_pads(node, data_spatial_dims); } std::shared_ptr get_reshaped_filter(const Output& filters, const int32_t groups) { - auto shape_of_filters = std::make_shared(filters); + /* filters' layout is [O,I,W,H]. + * Divide O with groups: + * grouped_O = O / groups + * The final grouped filters' layout is [groups, grouped_O, I, W, H] + */ + const std::vector o_indices{0}; + auto filter_o_node = ngraph::op::util::node_to_get_shape_value_of_indices_from_shape_source(filters, o_indices); - auto num_begin = opset6::Constant::create(element::i64, Shape{1}, {0}); - auto num_end = opset6::Constant::create(element::i64, Shape{1}, {1}); - auto num_node = std::make_shared(shape_of_filters, - num_begin, - num_end, - std::vector{0}, - std::vector{0}); - - auto hw_begin = opset6::Constant::create(element::i64, Shape{1}, {1}); - auto hw_end = opset6::Constant::create(element::i64, Shape{1}, {4}); - auto filter_hw_node = std::make_shared(shape_of_filters, - hw_begin, - hw_end, - std::vector{0}, - std::vector{0}); + const std::vector ihw_indices{1, 2, 3}; + auto filter_ihw_node = ngraph::op::util::node_to_get_shape_value_of_indices_from_shape_source(filters, ihw_indices); auto groups_node = opset6::Constant::create(element::i64, Shape{1}, {groups}); - auto grouped_num_node = std::make_shared(num_node, groups_node); + auto grouped_o_node = std::make_shared(filter_o_node, groups_node); auto target_filter_shape = - std::make_shared(OutputVector{groups_node, grouped_num_node, filter_hw_node}, 0); + std::make_shared(OutputVector{groups_node, grouped_o_node, filter_ihw_node}, 0); return std::make_shared(filters, target_filter_shape, false); } diff --git a/src/frontends/paddlepaddle/src/op/elementwise_ops.cpp b/src/frontends/paddlepaddle/src/op/elementwise_ops.cpp index 7dcf4a62a11..d62707771fd 100644 --- a/src/frontends/paddlepaddle/src/op/elementwise_ops.cpp +++ b/src/frontends/paddlepaddle/src/op/elementwise_ops.cpp @@ -5,7 +5,7 @@ #include #include -#include "openvino/opsets/opset6.hpp" +#include "default_opset.hpp" namespace ov { namespace frontend { @@ -26,58 +26,54 @@ NamedOutputs elementwise_ops(const NodeContext& node) { if ((axis == -1) || (axis == x_rank - 1) || (x_rank == y_rank)) { return node.default_single_output_mapping({std::make_shared(x, y)}, {"Out"}); } else { - // This broadcast can be implemented by either ov::Reshape or - // ov::Broadcast. Since PDPD implicates y_shape is a subsequence of - // x_shape starting from axis, to use ov::Reshape like Paddle2ONNX, - // which is more friendly to PnP. - auto broadcast_shape = std::vector(x_rank, 1); - PartialShape y_shape = y.get_partial_shape(); - int32_t i = 0; - for (auto it = y_shape.begin(); it != y_shape.end(); ++i, ++it) - broadcast_shape[axis + i] = (*it).get_length(); + std::vector indices; + for (int64_t i = 0; i < axis; i++) + indices.push_back(i); + for (int64_t i = y_rank + axis; i < x_rank; i++) + indices.push_back(i); - auto reshape_node = - ov::opset6::Constant::create(ov::element::i64, ov::Shape{broadcast_shape.size()}, broadcast_shape); - auto y_node = std::make_shared(y, reshape_node, false); + auto indices_node = + default_opset::Constant::create(ngraph::element::i64, ngraph::Shape{indices.size()}, indices); + auto y_node = std::make_shared(y, indices_node); return node.default_single_output_mapping({std::make_shared(x, y_node)}, {"Out"}); } } // NamedOutputs elementwise_add(const NodeContext& node_context) { - return elementwise_ops(node_context); + return elementwise_ops(node_context); } NamedOutputs elementwise_sub(const NodeContext& node_context) { - return elementwise_ops(node_context); + return elementwise_ops(node_context); } NamedOutputs elementwise_mul(const NodeContext& node_context) { - return elementwise_ops(node_context); + return elementwise_ops(node_context); } NamedOutputs elementwise_div(const NodeContext& node_context) { - return elementwise_ops(node_context); + return elementwise_ops(node_context); } NamedOutputs elementwise_min(const NodeContext& node_context) { - return elementwise_ops(node_context); + return elementwise_ops(node_context); } NamedOutputs elementwise_max(const NodeContext& node_context) { - return elementwise_ops(node_context); + return elementwise_ops(node_context); } NamedOutputs elementwise_pow(const NodeContext& node_context) { - return elementwise_ops(node_context); + return elementwise_ops(node_context); } NamedOutputs elementwise_equal(const NodeContext& node_context) { - return elementwise_ops(node_context); + return elementwise_ops(node_context); } NamedOutputs elementwise_greater_equal(const NodeContext& node_context) { - return elementwise_ops(node_context); + return elementwise_ops(node_context); } } // namespace op