Removed And operation (#1183)

This commit is contained in:
Ilya Churaev
2020-07-07 13:32:35 +03:00
committed by GitHub
parent 476dc0f00f
commit 59579eb437
19 changed files with 172 additions and 340 deletions

View File

@@ -93,30 +93,3 @@ bool op::v1::LogicalAnd::evaluate(const HostTensorVector& outputs, const HostTen
{
return evaluate_logand(inputs[0], inputs[1], outputs[0], get_autob());
}
constexpr NodeTypeInfo op::v0::And::type_info;
op::v0::And::And(const Output<Node>& arg0,
const Output<Node>& arg1,
const AutoBroadcastSpec& auto_broadcast)
: BinaryElementwiseLogical(arg0, arg1, auto_broadcast)
{
constructor_validate_and_infer_types();
}
bool op::v0::And::visit_attributes(AttributeVisitor& visitor)
{
BinaryElementwiseLogical::visit_attributes(visitor);
return true;
}
shared_ptr<Node> op::v0::And::clone_with_new_inputs(const OutputVector& new_args) const
{
check_new_args_count(this, new_args);
return make_shared<v0::And>(new_args.at(0), new_args.at(1), this->get_autob());
}
bool op::v0::And::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs)
{
return evaluate_logand(inputs[0], inputs[1], outputs[0], get_autob());
}

View File

@@ -58,42 +58,6 @@ namespace ngraph
bool evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) override;
};
} // namespace v0
namespace v0
{
/// \brief Elementwise logical-and operation.
///
class NGRAPH_API And : public util::BinaryElementwiseLogical
{
public:
static constexpr NodeTypeInfo type_info{"And", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs a logical-and operation.
And() = default;
/// \brief Constructs a logical-and operation.
///
/// \param arg0 Output that produces the first input tensor.<br>
/// `[d0, ...]`
/// \param arg1 Output that produces the second input tensor.<br>
/// `[d0, ...]`
/// \param auto_broadcast Auto broadcast specification
///
/// Output `[d0, ...]`
///
And(const Output<Node>& arg0,
const Output<Node>& arg1,
const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec());
std::shared_ptr<Node>
clone_with_new_inputs(const OutputVector& new_args) const override;
bool visit_attributes(AttributeVisitor& visitor) override;
virtual bool is_commutative() const override { return true; }
bool evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) override;
};
}
using v0::And;
} // namespace v1
}
}

View File

@@ -33,7 +33,6 @@ NGRAPH_OP(Add, ngraph::op::v0, 0)
NGRAPH_OP(Add, ngraph::op::v1, 1)
NGRAPH_OP(All, ngraph::op::v0, 0)
NGRAPH_OP(AllReduce, ngraph::op::v0, 0)
NGRAPH_OP(And, ngraph::op::v0, 0)
NGRAPH_OP(Any, ngraph::op::v0, 0)
NGRAPH_OP(Asin, ngraph::op::v0, 0)
NGRAPH_OP(Asinh, ngraph::op::v3, 3)

View File

@@ -969,12 +969,6 @@ shared_ptr<Node> JSONDeserializer::deserialize_node(json node_js)
node = make_shared<op::AllReduce>(args[0]);
break;
}
case OP_TYPEID::And:
{
node = make_shared<op::And>(
args[0], args[1], read_auto_broadcast(node_js, "auto_broadcast"));
break;
}
case OP_TYPEID::Any:
{
auto reduction_axes = deserialize_axis_set(node_js.at("reduction_axes"));
@@ -2402,15 +2396,6 @@ json JSONSerializer::serialize_node(const Node& n)
}
case OP_TYPEID::AllReduce: { break;
}
case OP_TYPEID::And:
{
auto tmp = static_cast<const op::And*>(&n);
if (tmp->get_autob().m_type != op::AutoBroadcastType::NONE)
{
node["auto_broadcast"] = write_auto_broadcast(tmp->get_autob());
}
break;
}
case OP_TYPEID::Any:
{
auto tmp = static_cast<const op::Any*>(&n);

View File

@@ -81,7 +81,6 @@ set(SRC
opset_pass/binary_elementwise_opset_pass.cpp
opset_pass/broadcast_opset_pass.cpp
opset_pass/convolution_opset_pass.cpp
opset_pass/logical_and_opset_pass.cpp
opset_pass/logical_not_opset_pass.cpp
opset_pass/logical_or_opset_pass.cpp
opset_pass/logical_xor_opset_pass.cpp
@@ -321,7 +320,6 @@ set(MULTI_TEST_SRC
backend/group_convolution.in.cpp
backend/layer_norm.in.cpp
backend/log.in.cpp
backend/logical_and.in.cpp
backend/logical_or.in.cpp
backend/logical_xor.in.cpp
backend/lrn.in.cpp

View File

@@ -104,74 +104,6 @@ void check_auto_bcast(
}
}
NGRAPH_TEST(${BACKEND_NAME}, auto_bcast_binary_elementwise)
{
check_auto_bcast<op::Add, float, float>({{1, 2, 3, 4, 5, 6}, {5, 6, 7}}, {6, 8, 10, 9, 11, 13});
check_auto_bcast<op::Subtract, float, float>({{1, 2, 3, 4, 5, 6}, {5, 6, 7}},
{-4.f, -4.f, -4.f, -1.f, -1.f, -1.f});
check_auto_bcast<op::Multiply, float, float>({{1, 2, 3, 4, 5, 6}, {5, 6, 7}},
{5, 12, 21, 20, 30, 42});
check_auto_bcast<op::Divide, float, float>({{4, 5, 6, 7, 8, 9}, {1, 2, 3}},
{4, 2.5f, 2, 7, 4, 3});
check_auto_bcast<op::Maximum, float, float>({{1, 2, 3, 4, 5, 6}, {1, 5, 8}},
{1, 5, 8, 4, 5, 8});
check_auto_bcast<op::Minimum, float, float>({{1, 2, 3, 4, 5, 6}, {1, 5, 8}},
{1, 2, 3, 1, 5, 6});
check_auto_bcast<op::Power, float, float>({{1, 2, 3, 4, 5, 6}, {1, 2, 3}},
{1, 4, 27, 4, 25, 216},
op::AutoBroadcastSpec(op::AutoBroadcastType::NUMPY),
true);
check_auto_bcast<op::And, char, char>({{1, 0, 1, 0, 0, 1}, {1, 0, 1}}, {1, 0, 1, 0, 0, 1});
check_auto_bcast<op::Or, char, char>({{1, 0, 1, 0, 1, 1}, {1, 0, 0}}, {1, 0, 1, 1, 1, 1});
check_auto_bcast<op::Equal, uint8_t, char>({{1, 0, 1, 0, 1, 1}, {1, 0, 0}}, {1, 1, 0, 0, 0, 0});
check_auto_bcast<op::Greater, float, char>({{1, 2, 3, 4, 5, 6}, {1, 5, 8}}, {0, 0, 0, 1, 0, 0});
check_auto_bcast<op::GreaterEq, float, char>({{1, 2, 3, 4, 5, 6}, {1, 5, 8}},
{1, 0, 0, 1, 1, 0});
check_auto_bcast<op::Less, uint8_t, char>({{1, 2, 3, 4, 5, 6}, {1, 5, 8}}, {0, 1, 1, 0, 0, 1});
check_auto_bcast<op::LessEq, uint8_t, char>({{1, 2, 3, 4, 5, 6}, {1, 5, 8}},
{1, 1, 1, 0, 1, 1});
check_auto_bcast<op::NotEqual, uint8_t, char>({{1, 2, 3, 4, 5, 6}, {1, 5, 8}},
{0, 1, 1, 1, 0, 1});
}
NGRAPH_TEST(${BACKEND_NAME}, auto_bcast_binary_elementwise_pdpd)
{
const op::AutoBroadcastSpec& autob = op::AutoBroadcastSpec(op::AutoBroadcastType::PDPD, 1);
check_auto_bcast<op::Add, float, float>(
{{1, 2, 3, 4, 5, 6}, {5, 6, 7}}, {6, 8, 10, 9, 11, 13}, autob);
check_auto_bcast<op::Subtract, float, float>(
{{1, 2, 3, 4, 5, 6}, {5, 6, 7}}, {-4.f, -4.f, -4.f, -1.f, -1.f, -1.f}, autob);
check_auto_bcast<op::Multiply, float, float>(
{{1, 2, 3, 4, 5, 6}, {5, 6, 7}}, {5, 12, 21, 20, 30, 42}, autob);
check_auto_bcast<op::Divide, float, float>(
{{4, 5, 6, 7, 8, 9}, {1, 2, 3}}, {4, 2.5f, 2, 7, 4, 3}, autob);
check_auto_bcast<op::Maximum, float, float>(
{{1, 2, 3, 4, 5, 6}, {1, 5, 8}}, {1, 5, 8, 4, 5, 8}, autob);
check_auto_bcast<op::Minimum, float, float>(
{{1, 2, 3, 4, 5, 6}, {1, 5, 8}}, {1, 2, 3, 1, 5, 6}, autob);
check_auto_bcast<op::Power, float, float>(
{{1, 2, 3, 4, 5, 6}, {1, 2, 3}}, {1, 4, 27, 4, 25, 216}, autob, true);
check_auto_bcast<op::And, char, char>(
{{1, 0, 1, 0, 0, 1}, {1, 0, 1}}, {1, 0, 1, 0, 0, 1}, autob);
check_auto_bcast<op::Or, char, char>(
{{1, 0, 1, 0, 1, 1}, {1, 0, 0}}, {1, 0, 1, 1, 1, 1}, autob);
check_auto_bcast<op::Equal, uint8_t, char>(
{{1, 0, 1, 0, 1, 1}, {1, 0, 0}}, {1, 1, 0, 0, 0, 0}, autob);
check_auto_bcast<op::Greater, float, char>(
{{1, 2, 3, 4, 5, 6}, {1, 5, 8}}, {0, 0, 0, 1, 0, 0}, autob);
check_auto_bcast<op::GreaterEq, float, char>(
{{1, 2, 3, 4, 5, 6}, {1, 5, 8}}, {1, 0, 0, 1, 1, 0}, autob);
check_auto_bcast<op::Less, uint8_t, char>(
{{1, 2, 3, 4, 5, 6}, {1, 5, 8}}, {0, 1, 1, 0, 0, 1}, autob);
check_auto_bcast<op::LessEq, uint8_t, char>(
{{1, 2, 3, 4, 5, 6}, {1, 5, 8}}, {1, 1, 1, 0, 1, 1}, autob);
check_auto_bcast<op::NotEqual, uint8_t, char>(
{{1, 2, 3, 4, 5, 6}, {1, 5, 8}}, {0, 1, 1, 1, 0, 1}, autob);
}
NGRAPH_TEST(${BACKEND_NAME}, auto_bcast_binary_elementwise_pdpd_dynamic)
{
auto pshape_a = PartialShape::dynamic();

View File

@@ -1,52 +0,0 @@
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "ngraph/runtime/tensor.hpp"
#include "runtime/backend.hpp"
#include "util/all_close.hpp"
#include "util/all_close_f.hpp"
#include "util/known_element_types.hpp"
#include "util/ndarray.hpp"
#include "util/test_control.hpp"
#include "util/test_tools.hpp"
using namespace std;
using namespace ngraph;
static string s_manifest = "${MANIFEST}";
NGRAPH_TEST(${BACKEND_NAME}, logical_and)
{
Shape shape{2, 2, 2};
auto A = make_shared<op::Parameter>(element::boolean, shape);
auto B = make_shared<op::Parameter>(element::boolean, shape);
auto f = make_shared<Function>(make_shared<op::And>(A, B), ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::boolean, shape);
copy_data(a, vector<char>{1, 0, 1, 1, 1, 0, 1, 0});
auto b = backend->create_tensor(element::boolean, shape);
copy_data(b, vector<char>{0, 0, 1, 0, 0, 1, 1, 0});
auto result = backend->create_tensor(element::boolean, shape);
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a, b});
EXPECT_EQ((vector<char>{0, 0, 1, 0, 0, 0, 1, 0}), read_vector<char>(result));
}

View File

@@ -413,7 +413,6 @@ TEST(constant_folding, constant_unary_binary)
auto greater_eq_autob_numpy = make_shared<op::GreaterEq>(a, g, op::AutoBroadcastType::NUMPY);
auto less_autob_numpy = make_shared<op::Less>(a, g, op::AutoBroadcastType::NUMPY);
auto less_eq_autob_numpy = make_shared<op::LessEq>(a, g, op::AutoBroadcastType::NUMPY);
auto logical_and_autob_numpy = make_shared<op::And>(h, i, op::AutoBroadcastType::NUMPY);
auto logical_or_autob_numpy = make_shared<op::Or>(h, i, op::AutoBroadcastType::NUMPY);
auto logical_xor_autob_numpy = make_shared<op::Xor>(h, i, op::AutoBroadcastType::NUMPY);
@@ -442,7 +441,6 @@ TEST(constant_folding, constant_unary_binary)
greater_eq_autob_numpy,
less_autob_numpy,
less_eq_autob_numpy,
logical_and_autob_numpy,
logical_or_autob_numpy,
logical_xor_autob_numpy},
ParameterVector{});
@@ -475,7 +473,6 @@ TEST(constant_folding, constant_unary_binary)
vector<char> greater_eq_autob_numpy_expected{1, 0, 1, 1};
vector<char> less_autob_numpy_expected{0, 1, 0, 0};
vector<char> less_eq_autob_numpy_expected{1, 1, 0, 1};
vector<char> logical_and_autob_numpy_expected{0, 0, 0, 1};
vector<char> logical_or_autob_numpy_expected{0, 1, 1, 1};
vector<char> logical_xor_autob_numpy_expected{0, 1, 1, 0};
@@ -502,9 +499,8 @@ TEST(constant_folding, constant_unary_binary)
ASSERT_EQ(get_result_constant<char>(func, 20), greater_eq_autob_numpy_expected);
ASSERT_EQ(get_result_constant<char>(func, 21), less_autob_numpy_expected);
ASSERT_EQ(get_result_constant<char>(func, 22), less_eq_autob_numpy_expected);
ASSERT_EQ(get_result_constant<char>(func, 23), logical_and_autob_numpy_expected);
ASSERT_EQ(get_result_constant<char>(func, 24), logical_or_autob_numpy_expected);
ASSERT_EQ(get_result_constant<char>(func, 25), logical_xor_autob_numpy_expected);
ASSERT_EQ(get_result_constant<char>(func, 23), logical_or_autob_numpy_expected);
ASSERT_EQ(get_result_constant<char>(func, 24), logical_xor_autob_numpy_expected);
ASSERT_NO_THROW(pass_manager.run_passes(func_error));
}
@@ -1626,31 +1622,6 @@ TEST(constant_folding, const_less_eq)
ASSERT_EQ(values_expected, values_out);
}
TEST(constant_folding, const_and)
{
auto constant0 =
op::Constant::create(element::boolean, Shape{2, 3}, vector<int32_t>{0, 0, 1, 0, 1, 1});
auto constant1 =
op::Constant::create(element::boolean, Shape{2, 3}, vector<int32_t>{0, 1, 1, 1, 0, 1});
auto eq = make_shared<op::And>(constant0, constant1);
auto f = make_shared<Function>(eq, ParameterVector{});
pass::Manager pass_manager;
pass_manager.register_pass<pass::ConstantFolding>();
pass_manager.run_passes(f);
ASSERT_EQ(count_ops_of_type<op::And>(f), 0);
ASSERT_EQ(count_ops_of_type<op::Constant>(f), 1);
auto new_const = as_type_ptr<op::Constant>(f->get_results().at(0)->get_argument(0));
ASSERT_TRUE(new_const);
auto values_out = new_const->get_vector<char>();
vector<char> values_expected{0, 0, 1, 0, 0, 1};
ASSERT_EQ(values_expected, values_out);
}
TEST(constant_folding, const_or)
{
auto constant0 =

View File

@@ -18,6 +18,7 @@
#include "ngraph/ngraph.hpp"
#include "ngraph/validation_util.hpp"
#include "op/and.hpp"
#include "op/atan2.hpp"
#include "util/test_tools.hpp"
@@ -72,7 +73,7 @@ namespace
void op_is_And()
{
op::And node;
op::v0::And node;
EXPECT_FALSE(node.is_unary_elementwise_arithmetic());
EXPECT_FALSE(node.is_binary_elementwise_arithmetic());
EXPECT_FALSE(node.is_binary_elementwise_comparison());

View File

@@ -1,67 +0,0 @@
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "ngraph/pass/manager.hpp"
#include "opset0_downgrade.hpp"
#include "opset1_upgrade.hpp"
#include "util/type_prop.hpp"
using namespace std;
using namespace ngraph;
TEST(opset_transform, opset1_logical_and_upgrade_pass)
{
const auto a = make_shared<op::Parameter>(element::boolean, Shape{5, 10, 15});
const auto b = make_shared<op::Parameter>(element::boolean, Shape{5, 10, 15});
const auto and_v0 = make_shared<op::v0::And>(a, b);
const auto result = make_shared<op::Result>(and_v0);
auto f = make_shared<Function>(ResultVector{result}, ParameterVector{a, b});
ngraph::pass::Manager pass_manager;
pass_manager.register_pass<pass::Opset1Upgrade>();
pass_manager.run_passes(f);
const auto pass_replacement_node = f->get_result()->get_input_node_shared_ptr(0);
const auto and_v1 = as_type_ptr<op::v1::LogicalAnd>(pass_replacement_node);
ASSERT_TRUE(and_v1);
const auto values_out_element_type = and_v1->get_output_element_type(0);
EXPECT_EQ(values_out_element_type, element::boolean);
}
TEST(opset_transform, opset1_logical_and_downgrade_pass)
{
const auto a = make_shared<op::Parameter>(element::boolean, Shape{5, 10, 15});
const auto b = make_shared<op::Parameter>(element::boolean, Shape{5, 10, 15});
const auto and_v1 = make_shared<op::v1::LogicalAnd>(a, b);
const auto result = make_shared<op::Result>(and_v1);
auto f = make_shared<Function>(ResultVector{result}, ParameterVector{a, b});
ngraph::pass::Manager pass_manager;
pass_manager.register_pass<pass::Opset0Downgrade>();
pass_manager.run_passes(f);
const auto pass_replacement_node = f->get_result()->get_input_node_shared_ptr(0);
const auto and_v0 = as_type_ptr<op::v0::And>(pass_replacement_node);
ASSERT_TRUE(and_v0);
const auto values_out_element_type = and_v0->get_output_element_type(0);
EXPECT_EQ(values_out_element_type, element::boolean);
}

View File

@@ -31,6 +31,8 @@ set (SRC
performance_counter.hpp
dynamic/dynamic_backend.cpp
dynamic/dynamic_backend.hpp
op/and.cpp
op/and.hpp
op/atan2.cpp
op/atan2.hpp
op/avg_pool.cpp

View File

@@ -29,6 +29,7 @@
#include "ngraph/pass/manager.hpp"
#include "ngraph/serializer.hpp"
#include "ngraph/util.hpp"
#include "op/and.hpp"
#include "op/atan2.hpp"
#include "opset0_downgrade.hpp"
#include "opset1_downgrade.hpp"

View File

@@ -0,0 +1,95 @@
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "and.hpp"
#include "ngraph/runtime/host_tensor.hpp"
#include "ngraph/runtime/reference/and.hpp"
using namespace std;
using namespace ngraph;
namespace
{
template <element::Type_t ET>
bool evaluate(const HostTensorPtr& arg0,
const HostTensorPtr& arg1,
const HostTensorPtr& out,
const op::AutoBroadcastSpec& broadcast_spec)
{
runtime::reference::logical_and(arg0->get_data_ptr<ET>(),
arg1->get_data_ptr<ET>(),
out->get_data_ptr<ET>(),
arg0->get_shape(),
arg1->get_shape(),
broadcast_spec);
return true;
}
bool evaluate_logand(const HostTensorPtr& arg0,
const HostTensorPtr& arg1,
const HostTensorPtr& out,
const op::AutoBroadcastSpec& broadcast_spec)
{
bool rc = true;
out->set_broadcast(broadcast_spec, arg0, arg1);
switch (arg0->get_element_type())
{
TYPE_CASE(boolean)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(i32)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(i64)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(u32)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(u64)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(f16)(arg0, arg1, out, broadcast_spec);
break;
TYPE_CASE(f32)(arg0, arg1, out, broadcast_spec);
break;
default: rc = false; break;
}
return rc;
}
}
constexpr NodeTypeInfo op::v0::And::type_info;
op::v0::And::And(const Output<Node>& arg0,
const Output<Node>& arg1,
const AutoBroadcastSpec& auto_broadcast)
: BinaryElementwiseLogical(arg0, arg1, auto_broadcast)
{
constructor_validate_and_infer_types();
}
bool op::v0::And::visit_attributes(AttributeVisitor& visitor)
{
BinaryElementwiseLogical::visit_attributes(visitor);
return true;
}
shared_ptr<Node> op::v0::And::clone_with_new_inputs(const OutputVector& new_args) const
{
check_new_args_count(this, new_args);
return make_shared<v0::And>(new_args.at(0), new_args.at(1), this->get_autob());
}
bool op::v0::And::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs)
{
return evaluate_logand(inputs[0], inputs[1], outputs[0], get_autob());
}

View File

@@ -0,0 +1,63 @@
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include <memory>
#include "backend_visibility.hpp"
#include "ngraph/op/util/binary_elementwise_logical.hpp"
namespace ngraph
{
namespace op
{
namespace v0
{
/// \brief Elementwise logical-and operation.
///
class BACKEND_API And : public util::BinaryElementwiseLogical
{
public:
static constexpr NodeTypeInfo type_info{"And", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs a logical-and operation.
And() = default;
/// \brief Constructs a logical-and operation.
///
/// \param arg0 Output that produces the first input tensor.<br>
/// `[d0, ...]`
/// \param arg1 Output that produces the second input tensor.<br>
/// `[d0, ...]`
/// \param auto_broadcast Auto broadcast specification
///
/// Output `[d0, ...]`
///
And(const Output<Node>& arg0,
const Output<Node>& arg1,
const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec());
std::shared_ptr<Node>
clone_with_new_inputs(const OutputVector& new_args) const override;
bool visit_attributes(AttributeVisitor& visitor) override;
virtual bool is_commutative() const override { return true; }
bool evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) override;
};
}
}
}

View File

@@ -17,6 +17,7 @@
#pragma once
#include "ngraph/ops.hpp"
#include "op/and.hpp"
#include "op/atan2.hpp"
#include "op/avg_pool.hpp"

View File

@@ -30,6 +30,7 @@
#include "ngraph/slice_plan.hpp"
#include "ngraph/type.hpp"
#include "ngraph/validation_util.hpp"
#include "op/and.hpp"
#include "op/avg_pool.hpp"
#include "opset0_downgrade.hpp"

View File

@@ -55,7 +55,7 @@ NGRAPH_OP(Acos, ngraph::op)
NGRAPH_OP(Add, ngraph::op)
NGRAPH_OP(All, ngraph::op)
NGRAPH_OP(AllReduce, ngraph::op)
NGRAPH_OP(And, ngraph::op)
NGRAPH_OP(And, ngraph::op::v0)
NGRAPH_OP(Any, ngraph::op)
NGRAPH_OP(Asin, ngraph::op)
NGRAPH_OP(Atan, ngraph::op)

View File

@@ -25,6 +25,7 @@
#include "ngraph/graph_util.hpp"
#include "ngraph/ops.hpp"
#include "ngraph/provenance.hpp"
#include "op/and.hpp"
#include "op/atan2.hpp"
#include "op/avg_pool.hpp"
@@ -50,7 +51,7 @@ namespace
return op_cast_binary_elementwise_node<op::v0::Add, op::v1::Add>(node);
}
shared_ptr<Node> op_cast(shared_ptr<op::And> node)
shared_ptr<Node> op_cast(shared_ptr<op::v0::And> node)
{
return op_cast_binary_elementwise_node<op::v0::And, op::v1::LogicalAnd>(node);
}

View File

@@ -193,14 +193,6 @@ void test_binary_logical(std::string /* node_type */,
test_binary_good_arguments(tv0_2_4_param_0, tv0_2_4_param_1);
}
TEST(type_prop, and_bad_arguments)
{
test_binary_logical(
"And", [](const shared_ptr<Node>& x, const shared_ptr<Node>& y) -> shared_ptr<Node> {
return make_shared<op::And>(x, y);
});
}
TEST(type_prop, or_bad_arguments)
{
test_binary_logical(
@@ -235,8 +227,7 @@ void test_binary_eltwise_numpy(const element::Type& et, const op::AutoBroadcastS
TEST(type_prop, eltwise_auto_bcast)
{
test_binary_eltwise_numpy<op::Add>(element::f32, op::AutoBroadcastType::NUMPY);
test_binary_eltwise_numpy<op::And>(element::boolean, op::AutoBroadcastType::NUMPY);
test_binary_eltwise_numpy<op::v1::Add>(element::f32, op::AutoBroadcastType::NUMPY);
test_binary_eltwise_numpy<op::Divide>(element::f32, op::AutoBroadcastType::NUMPY);
test_binary_eltwise_numpy<op::Equal>(element::f32, op::AutoBroadcastType::NUMPY);
test_binary_eltwise_numpy<op::Greater>(element::f32, op::AutoBroadcastType::NUMPY);
@@ -531,12 +522,6 @@ TEST(type_prop, binary_elementwise_arithmetic_right_et_dynamic)
TEST(type_prop, logic_arith_compare_partial_et)
{
auto test_logic = [](element::Type et0, element::Type et1) -> std::shared_ptr<Node> {
auto param0 = std::make_shared<op::Parameter>(et0, Shape{1, 2, 3});
auto param1 = std::make_shared<op::Parameter>(et1, Shape{1, 2, 3});
return std::make_shared<op::And>(param0, param1);
};
auto test_arith = [](element::Type et0, element::Type et1) -> std::shared_ptr<Node> {
auto param0 = std::make_shared<op::Parameter>(et0, Shape{1, 2, 3});
auto param1 = std::make_shared<op::Parameter>(et1, Shape{1, 2, 3});
@@ -554,27 +539,6 @@ TEST(type_prop, logic_arith_compare_partial_et)
return std::make_shared<op::Not>(param);
};
// Logical ops:
//
// int int -> !
// int boo -> !
// int dyn -> !
// boo int -> !
// boo boo -> boo
// boo dyn -> boo
// dyn int -> !
// dyn boo -> boo
// dyn dyn -> boo
ASSERT_ANY_THROW({ test_logic(element::i32, element::i32); });
ASSERT_ANY_THROW({ test_logic(element::i32, element::boolean); });
ASSERT_ANY_THROW({ test_logic(element::i32, element::dynamic); });
ASSERT_ANY_THROW({ test_logic(element::boolean, element::i32); });
ASSERT_EQ(test_logic(element::boolean, element::boolean)->get_element_type(), element::boolean);
ASSERT_EQ(test_logic(element::boolean, element::dynamic)->get_element_type(), element::boolean);
ASSERT_ANY_THROW({ test_logic(element::dynamic, element::i32); });
ASSERT_EQ(test_logic(element::dynamic, element::boolean)->get_element_type(), element::boolean);
ASSERT_EQ(test_logic(element::dynamic, element::dynamic)->get_element_type(), element::boolean);
// Arith ops:
//
// int int -> int