Enable paddle dino (#18111)

* Enable paddle op reduce_all, share_data; Support bool type for paddle op elementwise_mul

* Support bool/int16 for fill_any_like
This commit is contained in:
mei, yang 2023-08-02 09:24:28 +08:00 committed by GitHub
parent b6a32694a5
commit 8619d6d749
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 191 additions and 12 deletions

View File

@ -19,7 +19,11 @@ NamedOutputs elementwise_sub(const NodeContext& node_context) {
}
NamedOutputs elementwise_mul(const NodeContext& node_context) {
return elementwise_ops<default_opset::Multiply>(node_context);
auto x = node_context.get_input("X");
if (x.get_element_type() == ov::element::boolean)
return elementwise_ops<default_opset::LogicalAnd>(node_context);
else
return elementwise_ops<default_opset::Multiply>(node_context);
}
NamedOutputs elementwise_div(const NodeContext& node_context) {

View File

@ -17,16 +17,13 @@ NamedOutputs fill_any_like(const NodeContext& node) {
// when type does not define, use the input type
dtype = x.get_element_type();
}
const std::vector<element::Type> supported_type = {element::i32,
element::i64,
element::f16,
element::f32,
element::f64};
const std::vector<element::Type> supported_type =
{element::boolean, element::i16, element::i32, element::i64, element::f16, element::f32, element::f64};
const bool valid_type =
std::any_of(supported_type.begin(), supported_type.end(), [dtype](const element::Type& type) {
return dtype == type;
});
PADDLE_OP_CHECK(node, valid_type, "fill_any_like only supports i32, i64, f16, f32, f64");
PADDLE_OP_CHECK(node, valid_type, "Invalid dtype! Fill_any_like supports boolean, i16, i32, i64, f16, f32, f64");
const auto value_node = default_opset::Constant::create(dtype, {1}, {value});
const auto shape_node = std::make_shared<default_opset::ShapeOf>(x);

View File

@ -0,0 +1,17 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "reduce_ops.hpp"
namespace ov {
namespace frontend {
namespace paddle {
namespace op {
NamedOutputs reduce_all(const NodeContext& node_context) {
return reduce_ops<default_opset::ReduceLogicalAnd>(node_context);
}
} // namespace op
} // namespace paddle
} // namespace frontend
} // namespace ov

View File

@ -0,0 +1,24 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "openvino/frontend/paddle/node_context.hpp"
namespace ov {
namespace frontend {
namespace paddle {
namespace op {
NamedOutputs share_data(const NodeContext& node) {
auto x = node.get_input("X");
PADDLE_OP_CHECK(node,
x.get_element_type() == node.get_out_port_type("Out"),
"Input and output type should be the same");
NamedOutputs named_outputs;
named_outputs["Out"] = OutputVector{x};
return named_outputs;
}
} // namespace op
} // namespace paddle
} // namespace frontend
} // namespace ov

View File

@ -81,6 +81,7 @@ OP_CONVERTER(pool2d);
OP_CONVERTER(prior_box);
OP_CONVERTER(quantize_linear);
OP_CONVERTER(range);
OP_CONVERTER(reduce_all);
OP_CONVERTER(reduce_max);
OP_CONVERTER(reduce_mean);
OP_CONVERTER(reduce_min);
@ -96,12 +97,13 @@ OP_CONVERTER(scale);
OP_CONVERTER(select_input);
OP_CONVERTER(set_value);
OP_CONVERTER(shape);
OP_CONVERTER(share_data);
OP_CONVERTER(sigmoid);
OP_CONVERTER(silu);
OP_CONVERTER(slice);
OP_CONVERTER(softmax);
OP_CONVERTER(softplus);
OP_CONVERTER(softshrink);
OP_CONVERTER(sigmoid);
OP_CONVERTER(silu);
OP_CONVERTER(split);
OP_CONVERTER(sqrt);
OP_CONVERTER(squeeze);
@ -205,6 +207,7 @@ std::map<std::string, CreatorFunction> get_supported_ops() {
{"prior_box", op::prior_box},
{"quantize_linear", op::quantize_linear},
{"range", op::range},
{"reduce_all", op::reduce_all},
{"reduce_max", op::reduce_max},
{"reduce_mean", op::reduce_mean},
{"reduce_min", op::reduce_min},
@ -220,12 +223,13 @@ std::map<std::string, CreatorFunction> get_supported_ops() {
{"select_input", op::select_input},
{"set_value", op::set_value},
{"shape", op::shape},
{"share_data", op::share_data},
{"sigmoid", op::sigmoid},
{"silu", op::silu},
{"slice", op::slice},
{"softmax", op::softmax},
{"softplus", op::softplus},
{"softshrink", op::softshrink},
{"sigmoid", op::sigmoid},
{"silu", op::silu},
{"split", op::split},
{"sqrt", op::sqrt},
{"squeeze2", op::squeeze},

View File

@ -175,6 +175,7 @@ static const std::vector<std::string> models{
std::string("elementwise_floordiv_int64_1"),
std::string("elementwise_floordiv_int64_2"),
std::string("elementwise_floordiv_int64_3"),
std::string("elementwise_mul_bool1"),
std::string("embedding_0/embedding_0.pdmodel"),
std::string("embedding_sparse/embedding_sparse.pdmodel"),
std::string("embedding_none_weight/embedding_none_weight.pdmodel"),
@ -199,8 +200,11 @@ static const std::vector<std::string> models{
std::string("fill_any_like_f16"),
std::string("fill_any_like_f32"),
std::string("fill_any_like_f64"),
std::string("fill_any_like_i16"),
std::string("fill_any_like_i32"),
std::string("fill_any_like_i64"),
std::string("fill_any_like_bool"),
std::string("fill_any_like_bool_2"),
std::string("fill_constant"),
std::string("fill_constant_batch_size_like"),
std::string("fill_constant_int32"),
@ -404,6 +408,12 @@ static const std::vector<std::string> models{
std::string("range0"),
std::string("range1"),
std::string("range2"),
std::string("reduce_all_test_0"),
std::string("reduce_all_test_1"),
std::string("reduce_all_test_2"),
std::string("reduce_all_test_3"),
std::string("reduce_all_test_4"),
std::string("reduce_all_test_5"),
std::string("reduce_max_test_0"),
std::string("reduce_max_test_1"),
std::string("reduce_max_test_2"),
@ -474,6 +484,7 @@ static const std::vector<std::string> models{
// std::string("set_value_dynamic1"),
std::string("set_value_dynamic2"),
std::string("shape"),
std::string("share_data_test_0"),
std::string("sigmoid"),
std::string("silu_static_test1"),
std::string("silu_static_test2"),

View File

@ -121,6 +121,28 @@ def elementwise_mul(name : str, x, y, axis, in_dtype):
return outs[0]
def elementwise_mul_bool(name : str, x, y, in_dtype='bool'):
import paddle
paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):
node_x = paddle.static.data(name = 'x', shape = x.shape, dtype = in_dtype)
node_y = paddle.static.data(name = 'y', shape = y.shape, dtype = in_dtype)
mul = node_x * node_y
out = paddle.cast(mul, 'float32')
cpu = paddle.static.cpu_places(1)
exe = paddle.static.Executor(cpu[0])
# startup program will call initializer to initialize the parameters.
exe.run(paddle.static.default_startup_program())
outs = exe.run(
feed={'x': x, 'y': y},
fetch_list=[out])
saveModel(name, exe, feedkeys=['x', 'y'], fetchlist=[out], inputs=[x, y], outputs=[outs[0]], target_dir=sys.argv[1])
return outs[0]
def elementwise_min(name : str, x, y, axis, in_dtype):
import paddle
@ -272,6 +294,11 @@ def main():
elementwise_floordiv("elementwise_floordiv_" + dtype + "_3",
data_x.astype(dtype), data_y.astype(dtype), axis, dtype)
# test for elementwise_mul with bool data type
sample_arr = [True, False]
data_x = np.random.choice(sample_arr, size=(2,3,4))
data_y = np.random.choice(sample_arr, size=(1,3,4))
elementwise_mul_bool("elementwise_mul_bool1", data_x, data_y)
if __name__ == "__main__":
main()

View File

@ -15,7 +15,7 @@ def fill_any_like(name:str, x, value, dtype=None):
paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):
data = paddle.static.data(name='x', shape=x.shape, dtype = data_type)
data = paddle.static.data(name='x', shape=x.shape, dtype = x.dtype)
out = paddle.full_like(data, value, dtype=dtype)
out = paddle.cast(out, np.float32)
@ -39,8 +39,14 @@ def main():
fill_any_like("fill_any_like_f16", x, 1.0, dtype='float16')
fill_any_like("fill_any_like_f32", x, 1.2, dtype='float32')
fill_any_like("fill_any_like_f64", x, 1.2, dtype='float64')
fill_any_like("fill_any_like_i16", x, 3, dtype='int16')
fill_any_like("fill_any_like_i32", x, 2, dtype='int32')
fill_any_like("fill_any_like_i64", x, 10, dtype='int64')
fill_any_like("fill_any_like_bool", x, True, dtype='bool')
sample_arr = [True, False]
x = np.random.choice(sample_arr, size=(13,17,11))
fill_any_like("fill_any_like_bool_2", x, False, dtype=None)
if __name__ == "__main__":
main()

View File

@ -0,0 +1,48 @@
# Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
# reduce_all paddle model generator
#
import numpy as np
import sys
from save_model import saveModel
def reduce_all(name : str, x, axis=None, keepdim=False):
import paddle
paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):
data_x = paddle.static.data(name='x', shape=x.shape, dtype=x.dtype)
reduced = paddle.fluid.layers.reduce_all(data_x, dim=axis, keep_dim=keepdim)
out = paddle.cast(reduced, 'int32')
cpu = paddle.static.cpu_places(1)
exe = paddle.static.Executor(cpu[0])
# startup program will call initializer to initialize the parameters.
exe.run(paddle.static.default_startup_program())
outs = exe.run(
feed={'x': x},
fetch_list=[out])
saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1])
return outs[0]
def main():
sample_arr = [True, False]
data = np.random.choice(sample_arr, size=(3,4,5))
reduce_all("reduce_all_test_0", data)
reduce_all("reduce_all_test_1", data, axis=0, keepdim=False)
reduce_all("reduce_all_test_2", data, axis=-1, keepdim=False)
reduce_all("reduce_all_test_3", data, axis=1, keepdim=True)
reduce_all("reduce_all_test_4", data, axis=[1,2], keepdim=False)
reduce_all("reduce_all_test_5", data, axis=[0,1], keepdim=True)
if __name__ == "__main__":
main()

View File

@ -0,0 +1,41 @@
# Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
# share_data paddle model generator
#
import numpy as np
import sys
from save_model import saveModel
def share_data(name : str, x, axis=None, keepdim=False):
import paddle
paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):
data_x = paddle.static.data(name='x', shape=x.shape, dtype=x.dtype)
out = data_x.detach()
cpu = paddle.static.cpu_places(1)
exe = paddle.static.Executor(cpu[0])
# startup program will call initializer to initialize the parameters.
exe.run(paddle.static.default_startup_program())
outs = exe.run(
feed={'x': x},
fetch_list=[out])
saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1])
return outs[0]
def main():
data = np.random.rand(3,4,5).astype("float32")
share_data("share_data_test_0", data)
if __name__ == "__main__":
main()