Implement Einsum reference in nGraph interpreter (#5923)

* Implement Einsum reference in nGraph interpreter

Signed-off-by: Roman Kazantsev <roman.kazantsev@intel.com>

* Apply code-style patch and fix build issues

Signed-off-by: Roman Kazantsev <roman.kazantsev@intel.com>

* Fix CI build

Signed-off-by: Roman Kazantsev <roman.kazantsev@intel.com>

* Fix fast build and apply code review feedback

Signed-off-by: Roman Kazantsev <roman.kazantsev@intel.com>

* Add visitor API tests, check for input type and reduce memory consumption

Signed-off-by: Roman Kazantsev <roman.kazantsev@intel.com>

* Apply code style

Signed-off-by: Roman Kazantsev <roman.kazantsev@intel.com>

* Add visitor API tests

Signed-off-by: Roman Kazantsev <roman.kazantsev@intel.com>
This commit is contained in:
Roman Kazantsev 2021-06-04 11:08:30 +03:00 committed by GitHub
parent 291a80c84a
commit 5e8d1ccf92
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 1615 additions and 18 deletions

View File

@ -0,0 +1,23 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <algorithm>
#include <ngraph/opsets/opset7.hpp>
#include "ngraph/shape.hpp"
namespace ngraph
{
namespace runtime
{
namespace reference
{
void einsum(const HostTensorVector& outputs,
const HostTensorVector& inputs,
const std::string& equation);
} // namespace reference
} // namespace runtime
} // namespace ngraph

View File

@ -56,15 +56,7 @@ namespace ngraph
}
}
std::vector<size_t> get_transpose_order(const Shape& input_shape)
{
size_t rank = input_shape.size();
NGRAPH_CHECK(rank > 1, "Invalid input for transpose");
std::vector<size_t> axes_order(rank);
std::iota(axes_order.begin(), axes_order.end(), 0);
std::swap(axes_order[rank - 1], axes_order[rank - 2]);
return axes_order;
}
std::vector<size_t> get_transpose_order(const Shape& input_shape);
} // namespace details
/// \brief Reference kernel for matmul computation.
///

View File

@ -9,7 +9,6 @@
#include <numeric>
#include <vector>
#include "ngraph/runtime/opt_kernel/reshape.hpp"
#include "ngraph/shape.hpp"
namespace ngraph
@ -23,14 +22,7 @@ namespace ngraph
const Shape& data_shape,
size_t element_size,
const int64_t* axes_order,
Shape out_shape)
{
// To reuse opt_kernel::reshape axes order vector has to be converted to AxisVector
// Negative axes are not supported, it is validated by transpose evaluate method
std::vector<size_t> axis_vector(axes_order, axes_order + data_shape.size());
runtime::opt_kernel::reshape(
data, out, data_shape, axis_vector, out_shape, element_size);
}
Shape out_shape);
} // namespace reference
} // namespace runtime
} // namespace ngraph

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,33 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <cmath>
#include <numeric>
#include <utility>
#include <vector>
#include "ngraph/runtime/reference/matmul.hpp"
#include "ngraph/shape_util.hpp"
namespace ngraph
{
namespace runtime
{
namespace reference
{
namespace details
{
std::vector<size_t> get_transpose_order(const Shape& input_shape)
{
size_t rank = input_shape.size();
NGRAPH_CHECK(rank > 1, "Invalid input for transpose");
std::vector<size_t> axes_order(rank);
std::iota(axes_order.begin(), axes_order.end(), 0);
std::swap(axes_order[rank - 1], axes_order[rank - 2]);
return axes_order;
}
} // namespace details
} // namespace reference
} // namespace runtime
} // namespace ngraph

View File

@ -0,0 +1,35 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <cfenv>
#include <cmath>
#include <numeric>
#include <vector>
#include "ngraph/runtime/opt_kernel/reshape.hpp"
#include "ngraph/runtime/reference/transpose.hpp"
#include "ngraph/shape.hpp"
namespace ngraph
{
namespace runtime
{
namespace reference
{
void transpose(const char* data,
char* out,
const Shape& data_shape,
size_t element_size,
const int64_t* axes_order,
Shape out_shape)
{
// To reuse opt_kernel::reshape axes order vector has to be converted to AxisVector
// Negative axes are not supported, it is validated by transpose evaluate method
std::vector<size_t> axis_vector(axes_order, axes_order + data_shape.size());
runtime::opt_kernel::reshape(
data, out, data_shape, axis_vector, out_shape, element_size);
}
} // namespace reference
} // namespace runtime
} // namespace ngraph

View File

@ -61,6 +61,7 @@ set(SRC
op_eval/binary_convolution.cpp
op_eval/bucketize.cpp
op_eval/clamp.cpp
op_eval/einsum.cpp
op_eval/floor_mod.cpp
op_eval/gelu.cpp
op_eval/hsigmoid.cpp
@ -221,6 +222,7 @@ set(SRC
visitors/op/cum_sum.cpp
visitors/op/deformable_psroi_pooling.cpp
visitors/op/detection_output.cpp
visitors/op/einsum.cpp
visitors/op/elu.cpp
visitors/op/extractimagepatches.cpp
visitors/op/fake_quantize.cpp

View File

@ -0,0 +1,237 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <string>
#include <vector>
#include "gtest/gtest.h"
#include "ngraph/op/einsum.hpp"
#include "ngraph/runtime/host_tensor.hpp"
#include "ngraph/validation_util.hpp"
#include "runtime/backend.hpp"
#include "util/engine/interpreter_engine.hpp"
#include "util/engine/test_engines.hpp"
#include "util/test_case.hpp"
#include "util/test_tools.hpp"
using namespace std;
using namespace ngraph;
static string s_manifest = "${MANIFEST}";
template <typename T>
static void aux_einsum_test(const std::vector<std::vector<T>>& inputs, const std::vector<Shape>& input_shapes,
const std::string& equation, const std::vector<T>& expected_result, const Shape& expected_shape)
{
NGRAPH_CHECK(inputs.size() == input_shapes.size());
OutputVector output_vector;
ParameterVector param_vector;
for (const auto& input_shape : input_shapes) {
auto param = make_shared<op::Parameter>(element::from<T>(), input_shape);
output_vector.push_back(param);
param_vector.push_back(param);
}
auto einsum = make_shared<op::v7::Einsum>(output_vector, equation);
auto fun = make_shared<Function>(OutputVector{einsum}, param_vector);
auto test_case = test::TestCase<ngraph::test::INTERPRETER_Engine>(fun);
for (size_t ind = 0; ind < inputs.size(); ++ind) {
test_case.add_input<T>(input_shapes[ind], inputs[ind]);
}
test_case.add_expected_output<T>(expected_shape, expected_result);
test_case.run();
}
TEST(op_eval, einsum_no_reduction)
{
std::string equation = "ab,cd->abcd";
std::vector<float> input1{1.0f, 2.0f};
Shape input1_shape{1, 2};
std::vector<float> input2{3.0f, 4.0f, 5.0f, 6.0f,
7.0f, 8.0f, 9.0f, 10.0f,
11.0f, 12.0f, 13.0f, 14.0f};
Shape input2_shape{3, 4};
std::vector<float> expected_result{
3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f,
6.0f, 8.0f, 10.0f, 12.0f, 14.0f, 16.0f, 18.0f, 20.0f, 22.0f, 24.0f, 26.0f, 28.0f
};
Shape expected_shape{1, 2, 3, 4};
aux_einsum_test(
{input1, input2}, {input1_shape, input2_shape}, equation, expected_result, expected_shape);
}
TEST(op_eval, einsum_transpose)
{
std::string equation = "ijk->kij";
std::vector<float> input1{1.0f, 2.0f, 3.0f,
4.0f, 5.0f, 6.0f};
Shape input1_shape{1, 2, 3};
std::vector<float> expected_result{1.0f, 4.0f,
2.0f, 5.0f,
3.0f, 6.0f};
Shape expected_shape{3, 1, 2};
aux_einsum_test(
{input1}, {input1_shape}, equation, expected_result, expected_shape);
}
TEST(op_eval, einsum_reduce)
{
std::string equation = "ab->a";
std::vector<float> input1{1.0f, 2.0f, 3.0f,
4.0f, 5.0f, 6.0f};
Shape input1_shape{2, 3};
std::vector<float> expected_result{6.0f, 15.0f};
Shape expected_shape{2};
aux_einsum_test({input1}, {input1_shape}, equation, expected_result, expected_shape);
}
TEST(op_eval, einsum_matrix_multiplication)
{
std::string equation = "ab,bc->ac";
std::vector<float> input1{1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f};
Shape input1_shape{2, 3};
std::vector<float> input2{1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f};
Shape input2_shape{3, 2};
std::vector<float> expected_result{22.0f, 28.0f, 49.0f, 64.0f};
Shape expected_shape{2, 2};
aux_einsum_test(
{input1, input2}, {input1_shape, input2_shape}, equation, expected_result, expected_shape);
}
TEST(op_eval, einsum_multiple_multiplication)
{
std::string equation = "ab,bcd,bc->ca";
std::vector<float> input1{1.0, 3.0, 2.0, 7.0,
5.0, 6.0, 0.0, 1.0};
Shape input1_shape{2, 4};
std::vector<float> input2{1.0, 2.0, 3.0,
4.0, 5.0, 6.0,
5.0, 7.0, 3.0,
7.0, 9.0, 1.0};
Shape input2_shape{4, 3, 1};
std::vector<float> input3{4.0, 3.0, 1.0,
6.0, 4.0, 2.0,
2.0, 5.0, 3.0,
1.0, 9.0, 4.0};
Shape input3_shape{4, 3};
std::vector<float> expected_result{145.0, 171.0,
703.0, 231.0,
85.0, 91.0};
Shape expected_shape{3, 2};
aux_einsum_test(
{input1, input2, input3},
{input1_shape, input2_shape, input3_shape},
equation, expected_result, expected_shape);
}
TEST(op_eval, einsum_ellipsis_one_input_reduction)
{
std::string equation = "a...->...";
std::vector<float> input1{1.0, 3.0, 2.0, 7.0, 5.0, 6.0,
3.0, 5.0, 2.0, 1.0, 0.0, 7.0};
Shape input1_shape{2, 2, 3};
std::vector<float> expected_result{4.0, 8.0, 4.0, 8.0, 5.0, 13.0};
Shape expected_shape{2, 3};
aux_einsum_test({input1},
{input1_shape},
equation,
expected_result,
expected_shape);
}
TEST(op_eval, einsum_ellipsis_one_input_transpose)
{
std::string equation = "a...->...a";
std::vector<float> input1{1.0, 3.0, 2.0, 7.0, 5.0, 6.0,
3.0, 5.0, 2.0, 1.0, 0.0, 7.0};
Shape input1_shape{2, 2, 3};
std::vector<float> expected_result{1.0, 3.0, 3.0, 5.0, 2.0, 2.0,
7.0, 1.0, 5.0, 0.0, 6.0, 7.0,};
Shape expected_shape{2, 3, 2};
aux_einsum_test({input1}, {input1_shape}, equation, expected_result, expected_shape);
}
TEST(op_eval, einsum_ellipsis_mul_by_1dscalar)
{
std::string equation = "ab...,...->ab...";
std::vector<float> input1{1.0, 3.0, 2.0, 7.0, 5.0, 6.0,
3.0, 5.0, 2.0, 1.0, 0.0, 7.0};
Shape input1_shape{2, 2, 3};
std::vector<float> input2{0.5};
Shape input2_shape{1};
std::vector<float> expected_result{0.5, 1.5, 1.0, 3.5, 2.5, 3.0,
1.5, 2.5, 1.0, 0.5, 0.0, 3.5};
Shape expected_shape{2, 2, 3};
aux_einsum_test({input1, input2}, {input1_shape, input2_shape}, equation, expected_result, expected_shape);
}
TEST(op_eval, einsum_ellipsis_complex_mul)
{
std::string equation = "a...j,j...->a...";
std::vector<float> input1{1.0, 3.0, 2.0, 7.0, 5.0, 6.0, 3.0, 5.0, 2.0, 1.0, 0.0, 7.0};
Shape input1_shape{1, 1, 4, 3};
std::vector<float> input2{3.0, 1.0, 6.0, 2.0, 3.0, 10.0, 9.0, 8.0, 2.0, 9.0, 3.0, 2.0,
4.0, 2.0, 3.0, 1.0, 9.0, 1.0, 11.0, 4.0, 7.0, 2.0, 3.0, 1.0};
Shape input2_shape{3, 4, 2, 1};
std::vector<float> expected_result{27., 85., 37., 66., 30., 58., 50., 8.,
37., 123., 55., 83., 16., 48., 24., 30.,
29., 83., 43., 52., 20., 92., 44., 24.,
24., 96., 48., 30., 13., 67., 31., 15.};
Shape expected_shape{1, 4, 2, 4};
aux_einsum_test(
{input1, input2}, {input1_shape, input2_shape}, equation, expected_result, expected_shape);
}
TEST(op_eval, einsum_diagonal)
{
std::string equation = "kii->ki";
std::vector<float> input1{1.0f, 2.0f, 3.0f,
4.0f, 5.0f, 6.0f,
7.0f, 8.0f, 9.0f};
Shape input1_shape{1, 3, 3};
std::vector<float> expected_result{1.0f, 5.0f, 9.0f};
Shape expected_shape{1, 3};
aux_einsum_test({input1}, {input1_shape}, equation, expected_result, expected_shape);
}
TEST(op_eval, einsum_diagonal_with_matmul)
{
std::string equation = "abbac,bad->ad";
std::vector<float> input1{
4.0, 2.0, 5.0, 4.0, 5.0, 5.0, 1.0, 1.0, 3.0, 3.0, 1.0, 1.0, 2.0, 2.0, 4.0, 1.0,
3.0, 4.0, 4.0, 5.0, 1.0, 3.0, 1.0, 3.0, 1.0, 4.0, 3.0, 5.0, 4.0, 4.0, 5.0, 4.0, 4.0,
5.0, 4.0, 2.0, 2.0, 2.0, 3.0, 3.0, 1.0, 1.0, 4.0, 3.0, 4.0, 2.0, 2.0, 1.0, 1.0, 2.0,
3.0, 1.0, 1.0, 4.0, 2.0, 3.0, 1.0, 3.0, 4.0, 2.0, 5.0, 5.0, 3.0, 4.0, 3.0, 4.0, 5.0,
4.0, 4.0, 5.0, 1.0, 3.0, 4.0, 4.0, 5.0, 3.0, 1.0, 3.0, 2.0, 5.0, 3.0, 2.0, 5.0, 4.0,
4.0, 2.0, 4.0, 4.0, 1.0, 4.0, 4.0, 5.0, 4.0, 4.0, 4.0, 2.0, 3.0, 3.0, 4.0, 2.0, 4.0,
2.0, 5.0, 1.0, 3.0, 2.0, 4.0, 3.0, 5.0, 1.0, 2.0, 3.0, 1.0, 1.0, 2.0, 5.0, 1.0, 1.0,
2.0, 1.0, 4.0, 5.0, 3.0, 4.0, 1.0, 3.0, 3.0, 1.0, 3.0, 2.0, 4.0, 5.0, 1.0, 1.0, 5.0,
4.0, 5.0, 2.0, 2.0, 3.0, 3.0, 1.0, 2.0, 4.0};
Shape input1_shape{2, 3, 3, 2, 4};
std::vector<float> input2{1.0, 4.0, 4.0, 5.0, 3.0, 3.0};
Shape input2_shape{3, 2, 1};
std::vector<float> expected_result{123, 129};
Shape expected_shape{2, 1};
aux_einsum_test({input1, input2}, {input1_shape, input2_shape}, equation, expected_result, expected_shape);
}

View File

@ -23,6 +23,7 @@
#include <ngraph/runtime/reference/deformable_convolution.hpp>
#include <ngraph/runtime/reference/deformable_psroi_pooling.hpp>
#include <ngraph/runtime/reference/detection_output.hpp>
#include <ngraph/runtime/reference/einsum.hpp>
#include <ngraph/runtime/reference/elu.hpp>
#include <ngraph/runtime/reference/embedding_bag_offsets_sum.hpp>
#include <ngraph/runtime/reference/embedding_bag_packed_sum.hpp>
@ -2460,6 +2461,16 @@ namespace
return true;
}
template <element::Type_t ET>
bool evaluate(const shared_ptr<op::v7::Einsum>& op,
const HostTensorVector& outputs,
const HostTensorVector& inputs)
{
const auto equation = op->get_equation();
runtime::reference::einsum(outputs, inputs, equation);
return true;
}
template <typename T>
bool evaluate_node(std::shared_ptr<Node> node,
const HostTensorVector& outputs,

View File

@ -92,5 +92,6 @@ NGRAPH_OP(GatherElements, op::v6)
NGRAPH_OP(MVN, ngraph::op::v6)
NGRAPH_OP(DFT, op::v7)
NGRAPH_OP(Einsum, op::v7)
NGRAPH_OP(IDFT, op::v7)
NGRAPH_OP(Roll, ngraph::op::v7)

View File

@ -0,0 +1,28 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "ngraph/opsets/opset1.hpp"
#include "ngraph/opsets/opset7.hpp"
#include "util/visitor.hpp"
using namespace std;
using namespace ngraph;
using ngraph::test::NodeBuilder;
using ngraph::test::ValueMap;
TEST(attributes, einsum_v7_op)
{
NodeBuilder::get_ops().register_factory<opset7::Einsum>();
auto input1 = make_shared<opset1::Parameter>(element::i32, Shape{2, 3});
auto input2 = make_shared<opset1::Parameter>(element::i32, Shape{3, 4});
std::string equation = "ab,bc->ac";
auto einsum = make_shared<opset7::Einsum>(OutputVector{input1, input2}, equation);
NodeBuilder builder(einsum);
auto g_einsum = as_type_ptr<opset7::Einsum>(builder.create());
EXPECT_EQ(g_einsum->get_equation(), einsum->get_equation());
}