Revise logical and (#6731)

* update docs

* add host tensors validation

* create type_prop tests

* create serialization single layer test

* create visitor test

* create op_reference test

* add logicalAnd to constants.py

* create additional op_reference tests

* add check for number of visited attributes in visitor test

* update auto_broadcast description

* remoove backend test

* update LogicalNot params name

* remove backend test from CMakeList

* create util function for type_prop tests

* update op_reference tests

* remove typo in docs

* remove unsupported types from evaluate

* fix bug in op_reference test

* refactor visitor test

* update math formula in the spec

* update has_evaluate types
This commit is contained in:
Piotr Szmelczynski 2021-08-03 18:37:16 +02:00 committed by GitHub
parent 950d7b8597
commit a30bd0c9bb
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 273 additions and 61 deletions

View File

@ -6,39 +6,40 @@
**Short description**: *LogicalAnd* performs element-wise logical AND operation with two given tensors applying multi-directional broadcast rules.
**Detailed description**: Before performing logical operation, input tensors *a* and *b* are broadcasted if their shapes are different and `auto_broadcast` attributes is not `none`. Broadcasting is performed according to `auto_broadcast` value.
After broadcasting *LogicalAnd* does the following with the input tensors *a* and *b*:
\f[
o_{i} = a_{i} \wedge b_{i}
\f]
**Attributes**:
* *auto_broadcast*
* **Description**: specifies rules used for auto-broadcasting of input tensors.
* **Range of values**:
* *none* - no auto-broadcasting is allowed, all input shapes should match
* *numpy* - numpy broadcasting rules, aligned with ONNX Broadcasting. Description is available in <a href="https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md">ONNX docs</a>.
* *none* - no auto-broadcasting is allowed, all input shapes must match,
* *numpy* - numpy broadcasting rules, description is available in [Broadcast Rules For Elementwise Operations](../broadcast_rules.md),
* *pdpd* - PaddlePaddle-style implicit broadcasting, description is available in [Broadcast Rules For Elementwise Operations](../broadcast_rules.md).
* **Type**: string
* **Default value**: "numpy"
* **Required**: *no*
**Inputs**
* **1**: A tensor of type *T*. **Required.**
* **2**: A tensor of type *T*. **Required.**
* **1**: A tensor of type *T* and arbitrary shape. **Required.**
* **2**: A tensor of type *T* and arbitrary shape. **Required.**
**Outputs**
* **1**: The result of element-wise logical AND operation. A tensor of type boolean.
* **1**: The result of element-wise *LogicalAnd* operation. A tensor of type boolean.
**Types**
* *T*: boolean type.
**Detailed description**
Before performing logical operation, input tensors *a* and *b* are broadcasted if their shapes are different and `auto_broadcast` attributes is not `none`. Broadcasting is performed according to `auto_broadcast` value.
After broadcasting *LogicalAnd* does the following with the input tensors *a* and *b*:
\f[
o_{i} = a_{i} and b_{i}
\f]
**Examples**

View File

@ -0,0 +1,83 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gtest/gtest.h>
#include <ie_core.hpp>
#include <ie_ngraph_utils.hpp>
#include <ngraph/ngraph.hpp>
#include <shared_test_classes/base/layer_test_utils.hpp>
#include <tuple>
#include "base_reference_test.hpp"
using namespace reference_tests;
using namespace ngraph;
using namespace InferenceEngine;
struct LogicalAndParams {
template <class IT, class OT>
LogicalAndParams(const ngraph::PartialShape& input_shape1, const ngraph::PartialShape& input_shape2 ,
const std::vector<IT>& iValues1, const std::vector<IT>& iValues2, const std::vector<OT>& oValues)
: pshape1(input_shape1), pshape2(input_shape2), inType(ngraph::element::boolean), outType(ngraph::element::boolean),
inputData1(CreateBlob(ngraph::element::boolean, iValues1)), inputData2(CreateBlob(ngraph::element::boolean, iValues2)),
refData(CreateBlob(ngraph::element::boolean, oValues)) {}
ngraph::PartialShape pshape1;
ngraph::PartialShape pshape2;
ngraph::element::Type inType;
ngraph::element::Type outType;
InferenceEngine::Blob::Ptr inputData1;
InferenceEngine::Blob::Ptr inputData2;
InferenceEngine::Blob::Ptr refData;
};
class ReferenceLogicalAndLayerTest : public testing::TestWithParam<LogicalAndParams>, public CommonReferenceTest {
public:
void SetUp() override {
auto params = GetParam();
function = CreateFunction(params.pshape1, params.pshape2, params.inType);
inputData = {params.inputData1, params.inputData2};
refOutData = {params.refData};
}
static std::string getTestCaseName(const testing::TestParamInfo<LogicalAndParams>& obj) {
auto param = obj.param;
std::ostringstream result;
result << "input_shape1=" << param.pshape1 << "_";
result << "input_shape2=" << param.pshape2 << "_";
result << "iType=" << param.inType << "_";
result << "oType=" << param.outType;
return result.str();
}
private:
static std::shared_ptr<Function> CreateFunction(const PartialShape& input_shape1,
const PartialShape& input_shape2, const element::Type& input_type) {
const auto in = std::make_shared<op::Parameter>(input_type, input_shape1);
const auto in2 = std::make_shared<op::Parameter>(input_type, input_shape2);
const auto logical_and = std::make_shared<op::v1::LogicalAnd>(in, in2);
return std::make_shared<Function>(NodeVector {logical_and}, ParameterVector {in, in2});
}
};
TEST_P(ReferenceLogicalAndLayerTest, CompareWithHardcodedRefs) {
Exec();
}
INSTANTIATE_TEST_SUITE_P(
smoke_LogicalAnd_With_Hardcoded_Refs, ReferenceLogicalAndLayerTest,
::testing::Values(
LogicalAndParams(ngraph::PartialShape {2, 2}, ngraph::PartialShape {2, 2},
std::vector<char> {true, false, true, false},
std::vector<char> {false, true, true, false},
std::vector<char> {false, false, true, false}),
LogicalAndParams(ngraph::PartialShape {2, 1, 2, 1}, ngraph::PartialShape {1, 1, 2, 1},
std::vector<char> {true, false, true, false},
std::vector<char> {true, false},
std::vector<char> {true, false, true, false}),
LogicalAndParams(ngraph::PartialShape {3, 4}, ngraph::PartialShape {3, 4},
std::vector<char> {true, true, true, true, true, false, true, false, false, true, true, true},
std::vector<char> {true, true, true, true, true, false, true, false, false, true, true, false},
std::vector<char> {true, true, true, true, true, false, true, false, false, true, true, false})),
ReferenceLogicalAndLayerTest::getTestCaseName);

View File

@ -0,0 +1,84 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include "shared_test_classes/single_layer/logical.hpp"
#include "common_test_utils/test_constants.hpp"
using namespace LayerTestsDefinitions;
using namespace LayerTestsDefinitions::LogicalParams;
namespace {
TEST_P(LogicalLayerTest, Serialize) {
Serialize();
}
std::map<std::vector<size_t>, std::vector<std::vector<size_t >>> inputShapes = {
{{1}, {{1}, {17}, {1, 1}, {2, 18}, {1, 1, 2}, {2, 2, 3}, {1, 1, 2, 3}}},
{{5}, {{1}, {1, 1}, {2, 5}, {1, 1, 1}, {2, 2, 5}}},
{{2, 200}, {{1}, {200}, {1, 200}, {2, 200}, {2, 2, 200}}},
{{1, 3, 20}, {{20}, {2, 1, 1}}},
{{2, 17, 3, 4}, {{4}, {1, 3, 4}, {2, 1, 3, 4}}},
{{2, 1, 1, 3, 1}, {{1}, {1, 3, 4}, {2, 1, 3, 4}, {1, 1, 1, 1, 1}}},
};
std::map<std::vector<size_t>, std::vector<std::vector<size_t >>> inputShapesNot = {
{{1}, {}},
{{5}, {}},
{{2, 200}, {}},
{{1, 3, 20}, {}},
{{2, 17, 3, 4}, {}},
{{2, 1, 1, 3, 1}, {}},
};
std::vector<InferenceEngine::Precision> inputsPrecisions = {
InferenceEngine::Precision::BOOL,
};
std::vector<ngraph::helpers::LogicalTypes> logicalOpTypes = {
ngraph::helpers::LogicalTypes::LOGICAL_AND,
ngraph::helpers::LogicalTypes::LOGICAL_OR,
ngraph::helpers::LogicalTypes::LOGICAL_XOR,
};
std::vector<ngraph::helpers::InputLayerType> secondInputTypes = {
ngraph::helpers::InputLayerType::CONSTANT,
ngraph::helpers::InputLayerType::PARAMETER,
};
std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
};
std::map<std::string, std::string> additional_config = {};
const auto LogicalTestParams = ::testing::Combine(
::testing::ValuesIn(LogicalLayerTest::combineShapes(inputShapes)),
::testing::ValuesIn(logicalOpTypes),
::testing::ValuesIn(secondInputTypes),
::testing::ValuesIn(netPrecisions),
::testing::ValuesIn(inputsPrecisions),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(CommonTestUtils::DEVICE_CPU),
::testing::Values(additional_config));
const auto LogicalNotTestParams = ::testing::Combine(
::testing::ValuesIn(LogicalLayerTest::combineShapes(inputShapesNot)),
::testing::Values(ngraph::helpers::LogicalTypes::LOGICAL_NOT),
::testing::Values(ngraph::helpers::InputLayerType::CONSTANT),
::testing::ValuesIn(netPrecisions),
::testing::ValuesIn(inputsPrecisions),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(CommonTestUtils::DEVICE_CPU),
::testing::Values(additional_config));
INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs, LogicalLayerTest, LogicalTestParams, LogicalLayerTest::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefsNot, LogicalLayerTest, LogicalNotTestParams, LogicalLayerTest::getTestCaseName);
} // namespace

View File

@ -57,6 +57,7 @@ VERIFIED_OP_REFERENCES = [
'LRN-1',
'LSTMCell-4',
'LSTMSequence-5',
'LogicalAnd-1'
'LogSoftmax-5',
'Loop-5',
'MVN-1',

View File

@ -7,6 +7,8 @@
#include "ngraph/runtime/host_tensor.hpp"
#include "ngraph/runtime/reference/and.hpp"
#include "ngraph/validation_util.hpp"
using namespace std;
using namespace ngraph;
@ -61,12 +63,6 @@ namespace logand
switch (arg0->get_element_type())
{
NGRAPH_TYPE_CASE(evaluate_logand, boolean, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_logand, i32, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_logand, i64, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_logand, u32, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_logand, u64, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_logand, f16, arg0, arg1, out, broadcast_spec);
NGRAPH_TYPE_CASE(evaluate_logand, f32, arg0, arg1, out, broadcast_spec);
default: rc = false; break;
}
return rc;
@ -77,6 +73,7 @@ bool op::v1::LogicalAnd::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(v1_LogicalAnd_evaluate);
NGRAPH_CHECK(validate_host_tensor_vector(outputs, 1) && validate_host_tensor_vector(inputs, 2));
return logand::evaluate_logand(inputs[0], inputs[1], outputs[0], get_autob());
}
@ -85,13 +82,7 @@ bool op::v1::LogicalAnd::has_evaluate() const
NGRAPH_OP_SCOPE(v1_LogicalAnd_has_evaluate);
switch (get_input_element_type(0))
{
case ngraph::element::boolean:
case ngraph::element::i32:
case ngraph::element::i64:
case ngraph::element::u32:
case ngraph::element::u64:
case ngraph::element::f16:
case ngraph::element::f32: return true;
case ngraph::element::boolean: return true;
default: break;
}
return false;

View File

@ -152,6 +152,7 @@ set(SRC
type_prop/hswish.cpp
type_prop/idft.cpp
type_prop/interpolate.cpp
type_prop/logical_and.cpp
type_prop/lrn.cpp
type_prop/lstm_cell.cpp
type_prop/lstm_sequence.cpp
@ -271,6 +272,7 @@ set(SRC
visitors/op/less_equal.cpp
visitors/op/less.cpp
visitors/op/log.cpp
visitors/op/logical_and.cpp
visitors/op/logical_or.cpp
visitors/op/logical_xor.cpp
visitors/op/lrn.cpp
@ -452,7 +454,6 @@ set(MULTI_TEST_SRC
backend/interpolate.in.cpp
backend/log.in.cpp
backend/log_softmax.in.cpp
backend/logical_and.in.cpp
backend/logical_not.in.cpp
backend/logical_or.in.cpp
backend/logical_xor.in.cpp

View File

@ -1,34 +0,0 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "util/engine/test_engines.hpp"
#include "util/test_case.hpp"
#include "util/test_control.hpp"
NGRAPH_SUPPRESS_DEPRECATED_START
using namespace std;
using namespace ngraph;
static string s_manifest = "${MANIFEST}";
using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME});
NGRAPH_TEST(${BACKEND_NAME}, logical_and)
{
Shape shape{3, 4};
auto A = make_shared<op::Parameter>(element::boolean, shape);
auto B = make_shared<op::Parameter>(element::boolean, shape);
auto f =
make_shared<Function>(std::make_shared<op::v1::LogicalAnd>(A, B), ParameterVector{A, B});
std::vector<bool> a{true, true, true, true, true, false, true, false, false, true, true, true};
std::vector<bool> b{true, true, true, true, true, false, true, false, false, true, true, false};
auto test_case_1 = test::TestCase<TestEngine>(f);
test_case_1.add_multiple_inputs<bool>({a, b});
test_case_1.add_expected_output<float>(shape, {1., 1., 1., 1., 1., 0., 1., 0., 0., 1., 1., 0.});
test_case_1.run();
}

View File

@ -0,0 +1,72 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "util/type_prop.hpp"
using namespace std;
using namespace ngraph;
namespace {
void incorrect_init(const ngraph::element::Type& type, const std::string& err, const Shape& shape1 = {1, 3, 6}, const Shape& shape2 = {1, 3, 6}) {
auto input1 = make_shared<op::Parameter>(type, shape1);
auto input2 = make_shared<op::Parameter>(type, shape2);
try
{
auto logical_and = make_shared<op::v1::LogicalAnd>(input1, input2);
}
catch (const NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(), err);
}
}
}
TEST(type_prop, logical_and_incorrect_type_f32)
{
incorrect_init(element::f32, "Operands for logical operators must have boolean element type but have element type f32");
}
TEST(type_prop, logical_and_incorrect_type_f64)
{
incorrect_init(element::f64, "Operands for logical operators must have boolean element type but have element type f64");
}
TEST(type_prop, logical_and_incorrect_type_i32)
{
incorrect_init(element::i32, "Operands for logical operators must have boolean element type but have element type i32");
}
TEST(type_prop, logical_and_incorrect_type_i64)
{
incorrect_init(element::i64, "Operands for logical operators must have boolean element type but have element type i64");
}
TEST(type_prop, logical_and_incorrect_type_u32)
{
incorrect_init(element::u32, "Operands for logical operators must have boolean element type but have element type u32");
}
TEST(type_prop, logical_and_incorrect_type_u64)
{
incorrect_init(element::u64, "Operands for logical operators must have boolean element type but have element type u64");
}
TEST(type_prop, logical_and_incorrect_shape)
{
incorrect_init(element::boolean, "Argument shapes are inconsistent", Shape {1, 3, 6}, Shape {1, 2, 3});
}
TEST(type_prop, logical_and_broadcast)
{
auto input1 = make_shared<op::Parameter>(element::boolean, Shape{1, 1, 6});
auto input2 = make_shared<op::Parameter>(element::boolean, Shape{1, 3, 1});
auto logical_and = make_shared<op::v1::LogicalAnd>(input1, input2);
ASSERT_EQ(logical_and->get_element_type(), element::boolean);
ASSERT_EQ(logical_and->get_shape(), (Shape{1, 3, 6}));
}

View File

@ -0,0 +1,13 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "binary_ops.hpp"
#include "ngraph/opsets/opset1.hpp"
using Type = ::testing::Types<BinaryOperatorType<ngraph::opset1::LogicalAnd, ngraph::element::boolean>>;
INSTANTIATE_TYPED_TEST_SUITE_P(visitor_with_auto_broadcast,
BinaryOperatorVisitor,
Type,
BinaryOperatorTypeName);