Add paddle logical and reduce ops support. (#10352)

This commit is contained in:
wood-ghost
2022-02-15 16:23:50 +08:00
committed by GitHub
parent 39c90e9d48
commit ded2d00711
20 changed files with 627 additions and 15 deletions

View File

@@ -155,7 +155,10 @@ static const std::vector<std::string> models{std::string("argmax"),
std::string("linear_upsample_scales2"),
std::string("linear_upsample_true_0"),
std::string("log"),
std::string("logical_and"),
std::string("logical_not"),
std::string("logical_or"),
std::string("logical_xor"),
std::string("matmul_xt"),
std::string("matmul_xt_yt"),
std::string("matmul_yt"),
@@ -198,6 +201,36 @@ static const std::vector<std::string> models{std::string("argmax"),
std::string("range0"),
std::string("range1"),
std::string("range2"),
std::string("reduce_max_test_0"),
std::string("reduce_max_test_1"),
std::string("reduce_max_test_2"),
std::string("reduce_max_test_3"),
std::string("reduce_max_test_4"),
std::string("reduce_max_test_5"),
std::string("reduce_mean_test_0"),
std::string("reduce_mean_test_1"),
std::string("reduce_mean_test_2"),
std::string("reduce_mean_test_3"),
std::string("reduce_mean_test_4"),
std::string("reduce_mean_test_5"),
std::string("reduce_min_test_0"),
std::string("reduce_min_test_1"),
std::string("reduce_min_test_2"),
std::string("reduce_min_test_3"),
std::string("reduce_min_test_4"),
std::string("reduce_min_test_5"),
std::string("reduce_prod_test_0"),
std::string("reduce_prod_test_1"),
std::string("reduce_prod_test_2"),
std::string("reduce_prod_test_3"),
std::string("reduce_prod_test_4"),
std::string("reduce_prod_test_5"),
std::string("reduce_sum_test_0"),
std::string("reduce_sum_test_1"),
std::string("reduce_sum_test_2"),
std::string("reduce_sum_test_3"),
std::string("reduce_sum_test_4"),
std::string("reduce_sum_test_5"),
std::string("relu"),
std::string("relu6"),
std::string("relu6_1"),

View File

@@ -0,0 +1,50 @@
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
# logical_and paddle model generator
#
import numpy as np
from save_model import saveModel
import sys
def equal_logical_and(name : str, x, y, z):
import paddle
paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):
node_x = paddle.static.data(name='x', shape=x.shape, dtype='float32')
node_y = paddle.static.data(name='y', shape=y.shape, dtype='float32')
node_z = paddle.static.data(name='z', shape=z.shape, dtype='float32')
bool_x = paddle.equal(node_x, node_y)
bool_y = paddle.equal(node_x, node_z)
out = paddle.logical_and(bool_x, bool_y)
out = paddle.cast(out, x.dtype)
cpu = paddle.static.cpu_places(1)
exe = paddle.static.Executor(cpu[0])
# startup program will call initializer to initialize the parameters.
exe.run(paddle.static.default_startup_program())
outs = exe.run(
feed={'x': x, 'y': y, 'z': z},
fetch_list=[out])
saveModel(name, exe, feedkeys=['x', 'y', 'z'], fetchlist=[out],
inputs=[x, y, z], outputs=[outs[0]], target_dir=sys.argv[1])
return outs[0]
def main():
data_x = np.array([[[[-1, 0, 1]], [[2, 3, 4]]]]).astype(np.float32)
data_y = np.array([[[[2, 0, 3]], [[3, 1, 4]]]]).astype(np.float32)
data_z = np.array([[[[1, 0, 5]], [[2, 1, 0]]]]).astype(np.float32)
equal_logical_and("logical_and", data_x, data_y, data_z)
if __name__ == "__main__":
main()

View File

@@ -2,7 +2,7 @@
# SPDX-License-Identifier: Apache-2.0
#
# pool2d paddle model generator
# logical_not paddle model generator
#
import numpy as np
from save_model import saveModel
@@ -13,24 +13,25 @@ def equal_logical_not(name : str, x, y):
import paddle
paddle.enable_static()
node_x = paddle.static.data(name='x', shape=x.shape, dtype='float32')
node_y = paddle.static.data(name='y', shape=y.shape, dtype='float32')
with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):
node_x = paddle.static.data(name='x', shape=x.shape, dtype='float32')
node_y = paddle.static.data(name='y', shape=y.shape, dtype='float32')
out = paddle.equal(node_x, node_y)
out = paddle.logical_not(out)
out = paddle.cast(out, np.float32)
out = paddle.equal(node_x, node_y)
out = paddle.logical_not(out)
out = paddle.cast(out, np.float32)
cpu = paddle.static.cpu_places(1)
exe = paddle.static.Executor(cpu[0])
# startup program will call initializer to initialize the parameters.
exe.run(paddle.static.default_startup_program())
cpu = paddle.static.cpu_places(1)
exe = paddle.static.Executor(cpu[0])
# startup program will call initializer to initialize the parameters.
exe.run(paddle.static.default_startup_program())
outs = exe.run(
feed={'x': x, 'y': y},
fetch_list=[out])
outs = exe.run(
feed={'x': x, 'y': y},
fetch_list=[out])
saveModel(name, exe, feedkeys=['x', 'y'], fetchlist=[out],
inputs=[x, y], outputs=[outs[0]], target_dir=sys.argv[1])
saveModel(name, exe, feedkeys=['x', 'y'], fetchlist=[out],
inputs=[x, y], outputs=[outs[0]], target_dir=sys.argv[1])
return outs[0]

View File

@@ -0,0 +1,52 @@
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
# logical_or paddle model generator
#
import numpy as np
from save_model import saveModel
import sys
def equal_logical_or(name : str, x, y, z):
import paddle
paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):
node_x = paddle.static.data(name='x', shape=x.shape, dtype='float32')
node_y = paddle.static.data(name='y', shape=y.shape, dtype='float32')
node_z = paddle.static.data(name='z', shape=z.shape, dtype='float32')
bool_x = paddle.equal(node_x, node_y)
bool_y = paddle.equal(node_x, node_z)
out = paddle.logical_and(bool_x, bool_y)
out = paddle.cast(out, x.dtype)
cpu = paddle.static.cpu_places(1)
exe = paddle.static.Executor(cpu[0])
# startup program will call initializer to initialize the parameters.
exe.run(paddle.static.default_startup_program())
outs = exe.run(
feed={'x': x, 'y': y, 'z': z},
fetch_list=[out])
saveModel(name, exe, feedkeys=['x', 'y', 'z'], fetchlist=[out],
inputs=[x, y, z], outputs=[outs[0]], target_dir=sys.argv[1])
return outs[0]
def main():
data_x = np.array([[[[-1, 0, 1]], [[2, 3, 4]]]]).astype(np.float32)
data_y = np.array([[[[2, 0, 3]], [[3, 1, 4]]]]).astype(np.float32)
data_z = np.array([[[[1, 0, 5]], [[2, 1, 0]]]]).astype(np.float32)
equal_logical_or("logical_or", data_x, data_y, data_z)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,52 @@
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
# logical_xor paddle model generator
#
import numpy as np
from save_model import saveModel
import sys
def equal_logical_xor(name : str, x, y, z):
import paddle
paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):
node_x = paddle.static.data(name='x', shape=x.shape, dtype='float32')
node_y = paddle.static.data(name='y', shape=y.shape, dtype='float32')
node_z = paddle.static.data(name='z', shape=z.shape, dtype='float32')
bool_x = paddle.equal(node_x, node_y)
bool_y = paddle.equal(node_x, node_z)
out = paddle.logical_and(bool_x, bool_y)
out = paddle.cast(out, x.dtype)
cpu = paddle.static.cpu_places(1)
exe = paddle.static.Executor(cpu[0])
# startup program will call initializer to initialize the parameters.
exe.run(paddle.static.default_startup_program())
outs = exe.run(
feed={'x': x, 'y': y, 'z': z},
fetch_list=[out])
saveModel(name, exe, feedkeys=['x', 'y', 'z'], fetchlist=[out],
inputs=[x, y, z], outputs=[outs[0]], target_dir=sys.argv[1])
return outs[0]
def main():
data_x = np.array([[[[-1, 0, 1]], [[2, 3, 4]]]]).astype(np.float32)
data_y = np.array([[[[2, 0, 3]], [[3, 1, 4]]]]).astype(np.float32)
data_z = np.array([[[[1, 0, 5]], [[2, 1, 0]]]]).astype(np.float32)
equal_logical_xor("logical_xor", data_x, data_y, data_z)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,46 @@
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
# reduce_max paddle model generator
#
import numpy as np
import sys
from save_model import saveModel
def reduce_max(name : str, x, axis=None, keepdim=False):
import paddle
paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):
data_x = paddle.static.data(name='x', shape=x.shape, dtype=x.dtype)
out = paddle.max(data_x, axis=axis, keepdim=keepdim)
cpu = paddle.static.cpu_places(1)
exe = paddle.static.Executor(cpu[0])
# startup program will call initializer to initialize the parameters.
exe.run(paddle.static.default_startup_program())
outs = exe.run(
feed={'x': x},
fetch_list=[out])
saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1])
return outs[0]
def main():
data = np.array([[[1.0,2.0], [3.0, 4.0]], [[5.0,6.0], [7.0, 8.0]]]).astype(np.float32)
reduce_max("reduce_max_test_0", data)
reduce_max("reduce_max_test_1", data, axis=0, keepdim=False)
reduce_max("reduce_max_test_2", data, axis=-1, keepdim=False)
reduce_max("reduce_max_test_3", data, axis=1, keepdim=True)
reduce_max("reduce_max_test_4", data, axis=[1,2], keepdim=False)
reduce_max("reduce_max_test_5", data, axis=[0,1], keepdim=True)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,46 @@
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
# reduce_mean paddle model generator
#
import numpy as np
import sys
from save_model import saveModel
def reduce_mean(name : str, x, axis=None, keepdim=False):
import paddle
paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):
data_x = paddle.static.data(name='x', shape=x.shape, dtype=x.dtype)
out = paddle.mean(data_x, axis=axis, keepdim=keepdim)
cpu = paddle.static.cpu_places(1)
exe = paddle.static.Executor(cpu[0])
# startup program will call initializer to initialize the parameters.
exe.run(paddle.static.default_startup_program())
outs = exe.run(
feed={'x': x},
fetch_list=[out])
saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1])
return outs[0]
def main():
data = np.array([[[1.0,2.0], [3.0, 4.0]], [[5.0,6.0], [7.0, 8.0]]]).astype(np.float32)
reduce_mean("reduce_mean_test_0", data)
reduce_mean("reduce_mean_test_1", data, axis=0, keepdim=False)
reduce_mean("reduce_mean_test_2", data, axis=-1, keepdim=False)
reduce_mean("reduce_mean_test_3", data, axis=1, keepdim=True)
reduce_mean("reduce_mean_test_4", data, axis=[1,2], keepdim=False)
reduce_mean("reduce_mean_test_5", data, axis=[0,1], keepdim=True)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,46 @@
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
# reduce_min paddle model generator
#
import numpy as np
import sys
from save_model import saveModel
def reduce_min(name : str, x, axis=None, keepdim=False):
import paddle
paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):
data_x = paddle.static.data(name='x', shape=x.shape, dtype=x.dtype)
out = paddle.min(data_x, axis=axis, keepdim=keepdim)
cpu = paddle.static.cpu_places(1)
exe = paddle.static.Executor(cpu[0])
# startup program will call initializer to initialize the parameters.
exe.run(paddle.static.default_startup_program())
outs = exe.run(
feed={'x': x},
fetch_list=[out])
saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1])
return outs[0]
def main():
data = np.array([[[1.0,2.0], [3.0, 4.0]], [[5.0,6.0], [7.0, 8.0]]]).astype(np.float32)
reduce_min("reduce_min_test_0", data)
reduce_min("reduce_min_test_1", data, axis=0, keepdim=False)
reduce_min("reduce_min_test_2", data, axis=-1, keepdim=False)
reduce_min("reduce_min_test_3", data, axis=1, keepdim=True)
reduce_min("reduce_min_test_4", data, axis=[1,2], keepdim=False)
reduce_min("reduce_min_test_5", data, axis=[0,1], keepdim=True)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,46 @@
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
# reduce_prod paddle model generator
#
import numpy as np
import sys
from save_model import saveModel
def reduce_prod(name : str, x, axis=None, keepdim=False):
import paddle
paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):
data_x = paddle.static.data(name='x', shape=x.shape, dtype=x.dtype)
out = paddle.prod(data_x, axis=axis, keepdim=keepdim)
cpu = paddle.static.cpu_places(1)
exe = paddle.static.Executor(cpu[0])
# startup program will call initializer to initialize the parameters.
exe.run(paddle.static.default_startup_program())
outs = exe.run(
feed={'x': x},
fetch_list=[out])
saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1])
return outs[0]
def main():
data = np.array([[[1.0,2.0], [3.0, 4.0]], [[5.0,6.0], [7.0, 8.0]]]).astype(np.float32)
reduce_prod("reduce_prod_test_0", data)
reduce_prod("reduce_prod_test_1", data, axis=0, keepdim=False)
reduce_prod("reduce_prod_test_2", data, axis=-1, keepdim=False)
reduce_prod("reduce_prod_test_3", data, axis=1, keepdim=True)
reduce_prod("reduce_prod_test_4", data, axis=[1,2], keepdim=False)
reduce_prod("reduce_prod_test_5", data, axis=[0,1], keepdim=True)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,46 @@
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
# reduce_sum paddle model generator
#
import numpy as np
import sys
from save_model import saveModel
def reduce_sum(name : str, x, axis=None, keepdim=False):
import paddle
paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):
data_x = paddle.static.data(name='x', shape=x.shape, dtype=x.dtype)
out = paddle.sum(data_x, axis=axis, keepdim=keepdim)
cpu = paddle.static.cpu_places(1)
exe = paddle.static.Executor(cpu[0])
# startup program will call initializer to initialize the parameters.
exe.run(paddle.static.default_startup_program())
outs = exe.run(
feed={'x': x},
fetch_list=[out])
saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1])
return outs[0]
def main():
data = np.array([[[1.0,2.0], [3.0, 4.0]], [[5.0,6.0], [7.0, 8.0]]]).astype(np.float32)
reduce_sum("reduce_sum_test_0", data)
reduce_sum("reduce_sum_test_1", data, axis=0, keepdim=False)
reduce_sum("reduce_sum_test_2", data, axis=-1, keepdim=False)
reduce_sum("reduce_sum_test_3", data, axis=1, keepdim=True)
reduce_sum("reduce_sum_test_4", data, axis=[1,2], keepdim=False)
reduce_sum("reduce_sum_test_5", data, axis=[0,1], keepdim=True)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,20 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "default_opset.hpp"
#include "openvino/frontend/paddle/node_context.hpp"
namespace ov {
namespace frontend {
namespace paddle {
namespace op {
NamedOutputs logical_and(const NodeContext& node) {
auto x = node.get_input("X");
auto y = node.get_input("Y");
return node.default_single_output_mapping({std::make_shared<default_opset::LogicalAnd>(x, y)}, {"Out"});
}
} // namespace op
} // namespace paddle
} // namespace frontend
} // namespace ov

View File

@@ -0,0 +1,20 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "default_opset.hpp"
#include "openvino/frontend/paddle/node_context.hpp"
namespace ov {
namespace frontend {
namespace paddle {
namespace op {
NamedOutputs logical_or(const NodeContext& node) {
auto x = node.get_input("X");
auto y = node.get_input("Y");
return node.default_single_output_mapping({std::make_shared<default_opset::LogicalOr>(x, y)}, {"Out"});
}
} // namespace op
} // namespace paddle
} // namespace frontend
} // namespace ov

View File

@@ -0,0 +1,20 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "default_opset.hpp"
#include "openvino/frontend/paddle/node_context.hpp"
namespace ov {
namespace frontend {
namespace paddle {
namespace op {
NamedOutputs logical_xor(const NodeContext& node) {
auto x = node.get_input("X");
auto y = node.get_input("Y");
return node.default_single_output_mapping({std::make_shared<default_opset::LogicalXor>(x, y)}, {"Out"});
}
} // namespace op
} // namespace paddle
} // namespace frontend
} // namespace ov

View File

@@ -0,0 +1,17 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "reduce_ops.hpp"
namespace ov {
namespace frontend {
namespace paddle {
namespace op {
NamedOutputs reduce_max(const NodeContext& node_context) {
return reduce_ops<default_opset::ReduceMax>(node_context);
}
} // namespace op
} // namespace paddle
} // namespace frontend
} // namespace ov

View File

@@ -0,0 +1,17 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "reduce_ops.hpp"
namespace ov {
namespace frontend {
namespace paddle {
namespace op {
NamedOutputs reduce_mean(const NodeContext& node_context) {
return reduce_ops<default_opset::ReduceMean>(node_context);
}
} // namespace op
} // namespace paddle
} // namespace frontend
} // namespace ov

View File

@@ -0,0 +1,17 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "reduce_ops.hpp"
namespace ov {
namespace frontend {
namespace paddle {
namespace op {
NamedOutputs reduce_min(const NodeContext& node_context) {
return reduce_ops<default_opset::ReduceMin>(node_context);
}
} // namespace op
} // namespace paddle
} // namespace frontend
} // namespace ov

View File

@@ -0,0 +1,33 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "default_opset.hpp"
#include "openvino/frontend/paddle/node_context.hpp"
namespace ov {
namespace frontend {
namespace paddle {
namespace op {
template <typename T>
NamedOutputs reduce_ops(const NodeContext& node) {
auto x = node.get_input("X");
auto keep_dim = node.get_attribute<bool>("keep_dim");
auto reduce_all = node.get_attribute<bool>("reduce_all", false);
PADDLE_OP_CHECK(node, x.get_partial_shape().rank().is_static(), "reduce_ops: X rank must be static!");
int64_t input_rank = x.get_partial_shape().rank().get_length();
std::vector<int32_t> dims(input_rank);
if (reduce_all) {
std::iota(dims.begin(), dims.end(), 0);
} else {
dims = node.get_attribute<std::vector<int32_t>>("dim");
}
auto axesNode = default_opset::Constant::create(ngraph::element::i32, {dims.size()}, dims);
return node.default_single_output_mapping({std::make_shared<T>(x, axesNode, keep_dim)}, {"Out"});
}
} // namespace op
} // namespace paddle
} // namespace frontend
} // namespace ov

View File

@@ -0,0 +1,17 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "reduce_ops.hpp"
namespace ov {
namespace frontend {
namespace paddle {
namespace op {
NamedOutputs reduce_prod(const NodeContext& node_context) {
return reduce_ops<default_opset::ReduceProd>(node_context);
}
} // namespace op
} // namespace paddle
} // namespace frontend
} // namespace ov

View File

@@ -0,0 +1,17 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "reduce_ops.hpp"
namespace ov {
namespace frontend {
namespace paddle {
namespace op {
NamedOutputs reduce_sum(const NodeContext& node_context) {
return reduce_ops<default_opset::ReduceSum>(node_context);
}
} // namespace op
} // namespace paddle
} // namespace frontend
} // namespace ov

View File

@@ -44,7 +44,10 @@ OP_CONVERTER(layer_norm);
OP_CONVERTER(leaky_relu);
OP_CONVERTER(linear_interp_v2);
OP_CONVERTER(log);
OP_CONVERTER(logical_and);
OP_CONVERTER(logical_not);
OP_CONVERTER(logical_or);
OP_CONVERTER(logical_xor);
OP_CONVERTER(matmul);
OP_CONVERTER(matmul_v2);
OP_CONVERTER(matrix_nms);
@@ -55,6 +58,11 @@ OP_CONVERTER(pow);
OP_CONVERTER(pool2d);
OP_CONVERTER(prior_box);
OP_CONVERTER(range);
OP_CONVERTER(reduce_max);
OP_CONVERTER(reduce_mean);
OP_CONVERTER(reduce_min);
OP_CONVERTER(reduce_prod);
OP_CONVERTER(reduce_sum);
OP_CONVERTER(relu);
OP_CONVERTER(relu6);
OP_CONVERTER(reshape2);
@@ -115,7 +123,10 @@ std::map<std::string, CreatorFunction> get_supported_ops() {
{"leaky_relu", op::leaky_relu},
{"linear_interp_v2", op::linear_interp_v2},
{"log", op::log},
{"logical_and", op::logical_and},
{"logical_not", op::logical_not},
{"logical_or", op::logical_or},
{"logical_xor", op::logical_xor},
{"lookup_table_v2", op::embedding},
{"matmul", op::matmul},
{"matmul_v2", op::matmul_v2},
@@ -129,6 +140,11 @@ std::map<std::string, CreatorFunction> get_supported_ops() {
{"pool2d", op::pool2d},
{"prior_box", op::prior_box},
{"range", op::range},
{"reduce_max", op::reduce_max},
{"reduce_mean", op::reduce_mean},
{"reduce_min", op::reduce_min},
{"reduce_prod", op::reduce_prod},
{"reduce_sum", op::reduce_sum},
{"relu", op::relu},
{"relu6", op::relu6},
{"reshape2", op::reshape2},