Remove obsoleted v0::Sum op (#2938)
This commit is contained in:
@@ -45,14 +45,6 @@ static bool eliminate_nop(const std::shared_ptr<Node>& node) {
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool eliminate_sum(const std::shared_ptr<Node>& node) {
|
||||
auto sum = as_type_ptr<op::v0::Sum>(node);
|
||||
if (sum->get_reduction_axes().empty()) {
|
||||
return replace_output_update_name(node->output(0), node->input_value(0));
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool eliminate_convert(const std::shared_ptr<Node>& node) {
|
||||
bool is_out_type_agnostic = false;
|
||||
static const std::set<NodeTypeInfo> type_agnostic{TI(opset3::NonZero)};
|
||||
@@ -335,7 +327,6 @@ static bool eliminate_squeeze(const std::shared_ptr<Node>& node) {
|
||||
bool pass::NopElimination::run_on_function(std::shared_ptr<Function> function) {
|
||||
static const std::unordered_map<NodeTypeInfo, std::function<bool(const std::shared_ptr<Node>&)>>
|
||||
dispatcher{{TI(opset3::Pad), &eliminate_nop},
|
||||
{TI(op::v0::Sum), &eliminate_sum},
|
||||
{TI(opset3::Convert), &eliminate_convert},
|
||||
{TI(op::v1::StridedSlice), &eliminate_nop},
|
||||
{TI(opset3::Reshape), &eliminate_reshape_v1},
|
||||
|
||||
@@ -97,7 +97,8 @@ TEST(algebraic_simplification, multiply_prod_negative) {
|
||||
TEST(algebraic_simplification, multiply_sum_negative) {
|
||||
auto fconst1 = ngraph::op::Constant::create(element::f64, Shape{2}, {1.0, 1.0});
|
||||
auto broadcast = builder::opset1::make_broadcast(fconst1, Shape{2, 5}, AxisSet{1});
|
||||
auto sum_fconst1 = std::make_shared<op::Sum>(broadcast, AxisSet{0, 1});
|
||||
auto axes = op::Constant::create(element::i64, {2}, {0, 1});
|
||||
auto sum_fconst1 = std::make_shared<op::v1::ReduceSum>(broadcast, axes);
|
||||
|
||||
pass::Manager pass_manager;
|
||||
pass_manager.register_pass<pass::AlgebraicSimplification>();
|
||||
|
||||
@@ -26,19 +26,6 @@ NGRAPH_SUPPRESS_DEPRECATED_START
|
||||
using namespace ngraph;
|
||||
using namespace std;
|
||||
|
||||
TEST(nop_elimination, eliminate_sum) {
|
||||
Shape shape{2, 2};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape);
|
||||
auto s = make_shared<op::v0::Sum>(A, AxisSet{});
|
||||
auto f = make_shared<Function>(make_shared<op::v0::Abs>(s), ParameterVector{A});
|
||||
|
||||
pass::Manager pass_manager;
|
||||
pass_manager.register_pass<pass::NopElimination>();
|
||||
pass_manager.run_passes(f);
|
||||
|
||||
ASSERT_EQ(count_ops_of_type<op::v0::Sum>(f), 0);
|
||||
}
|
||||
|
||||
TEST(nop_elimination, eliminate_convert) {
|
||||
Shape shape{};
|
||||
auto type = element::f32;
|
||||
|
||||
@@ -25,7 +25,6 @@
|
||||
#include "ngraph/op/power.hpp"
|
||||
#include "ngraph/op/reduce_sum.hpp"
|
||||
#include "ngraph/op/sqrt.hpp"
|
||||
#include "ngraph/op/sum.hpp"
|
||||
#include "ngraph/opsets/opset1.hpp"
|
||||
#include "ngraph/shape.hpp"
|
||||
|
||||
|
||||
@@ -25,7 +25,6 @@
|
||||
#include "ngraph/op/reshape.hpp"
|
||||
#include "ngraph/op/sqrt.hpp"
|
||||
#include "ngraph/op/subtract.hpp"
|
||||
#include "ngraph/op/sum.hpp"
|
||||
#include "ngraph/opsets/opset1.hpp"
|
||||
#include "ngraph/util.hpp"
|
||||
|
||||
|
||||
@@ -173,7 +173,6 @@ NGRAPH_OP(Squeeze, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(StridedSlice, ngraph::op::v1, 1)
|
||||
NGRAPH_OP(Subtract, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(Subtract, ngraph::op::v1, 1)
|
||||
NGRAPH_OP(Sum, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(Tan, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(Tanh, ngraph::op::v0, 0)
|
||||
NGRAPH_OP(TensorIterator, ngraph::op::v0, 0)
|
||||
|
||||
@@ -1,113 +0,0 @@
|
||||
//*****************************************************************************
|
||||
// Copyright 2017-2020 Intel Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//*****************************************************************************
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "ngraph/axis_set.hpp"
|
||||
#include "ngraph/op/op.hpp"
|
||||
#include "ngraph/op/util/arithmetic_reduction.hpp"
|
||||
|
||||
namespace ngraph
|
||||
{
|
||||
namespace op
|
||||
{
|
||||
namespace v0
|
||||
{
|
||||
// clang-format off
|
||||
/// \brief Tensor sum operation.
|
||||
///
|
||||
/// Element-wise sums the input tensor, eliminating the specified reduction axes.
|
||||
/// For example:
|
||||
///
|
||||
/// \f[
|
||||
/// \mathit{sum}\left(\{0\},
|
||||
/// \left[ \begin{array}{ccc}
|
||||
/// 1 & 2 \\ 3 & 4 \\ 5 & 6 \end{array} \right]\right) =
|
||||
/// \left[ (1 + 3 + 5), (2 + 4 + 6) \right] =
|
||||
/// \left[ 9, 12 \right]~~~\text{(dimension 0 (rows) is eliminated)}
|
||||
/// \f]
|
||||
///
|
||||
/// \f[
|
||||
/// \mathit{sum}\left(\{1\},
|
||||
/// \left[ \begin{array}{ccc}
|
||||
/// 1 & 2 \\ 3 & 4 \\ 5 & 6 \end{array} \right]\right) =
|
||||
/// \left[ (1 + 2), (3 + 4), (5 + 6) \right] =
|
||||
/// \left[ 3, 7, 11 \right]~~~\text{(dimension 1 (columns) is eliminated)}
|
||||
/// \f]
|
||||
///
|
||||
/// \f[
|
||||
/// \mathit{sum}\left(\{0,1\},
|
||||
/// \left[ \begin{array}{ccc}
|
||||
/// 1 & 2 \\ 3 & 4 \\ 5 & 6 \end{array} \right]\right) =
|
||||
/// (1 + 2) + (3 + 4) + (5 + 6) =
|
||||
/// 21~~~\text{(both dimensions (rows and columns) are eliminated)}
|
||||
/// \f]
|
||||
///
|
||||
/// ## Parameters
|
||||
///
|
||||
/// | | Description |
|
||||
/// | -------------------- | ---------------------------------------- |
|
||||
/// | `reduction_axes` | The axes to eliminate through summation. |
|
||||
///
|
||||
/// ## Inputs
|
||||
///
|
||||
/// | | Type | Description |
|
||||
/// | ----- | --------------------------------- | ------------------------------------------------------ |
|
||||
/// | `arg` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | An input tensor of any shape and numeric element type. |
|
||||
///
|
||||
/// ## Output
|
||||
///
|
||||
/// | Type | Description |
|
||||
/// | ----------------------------------------- | ---------------------------------------------------------------------------------------------------------------- |
|
||||
/// | \f$N[\textit{delete}(A,d_1,\dots,d_n)]\f$ | The tensor \f$T\f$, where \f$T\f$ is the input tensor with the `reduction_axes` \f$A\f$ eliminated by summation. |
|
||||
// clang-format off
|
||||
class NGRAPH_DEPRECATED("This operation is deprecated and will be removed soon. "
|
||||
"Use v1::ReduceSum instead of it.") NGRAPH_API Sum : public util::ArithmeticReduction
|
||||
{
|
||||
NGRAPH_SUPPRESS_DEPRECATED_START
|
||||
public:
|
||||
static constexpr NodeTypeInfo type_info{ "Sum", 0 };
|
||||
const NodeTypeInfo& get_type_info() const override { return type_info; }
|
||||
/// \brief Constructs a summation operation.
|
||||
Sum() = default;
|
||||
/// \brief Constructs a summation operation.
|
||||
///
|
||||
/// \param arg The tensor to be summed.
|
||||
/// \param reduction_axes The axis positions (0-based) to be eliminated.
|
||||
Sum(const Output<Node>& arg, const AxisSet& reduction_axes);
|
||||
/// \brief Constructs a summation operation.
|
||||
///
|
||||
/// \param arg The tensor to be summed.
|
||||
/// \param reduction_axes The axis positions (0-based) to be eliminated.
|
||||
Sum(const Output<Node>& arg, const Output<Node>& reduction_axes);
|
||||
|
||||
virtual std::shared_ptr<Node>
|
||||
clone_with_new_inputs(const OutputVector& new_args) const override;
|
||||
|
||||
/// \return The default value for Sum.
|
||||
virtual std::shared_ptr<Node> get_default_value() const override;
|
||||
|
||||
bool evaluate(const HostTensorVector& outputs,
|
||||
const HostTensorVector& inputs) const override;
|
||||
NGRAPH_SUPPRESS_DEPRECATED_END
|
||||
};
|
||||
}
|
||||
// default opset version
|
||||
NGRAPH_SUPPRESS_DEPRECATED_START
|
||||
using v0::Sum;
|
||||
NGRAPH_SUPPRESS_DEPRECATED_END
|
||||
}
|
||||
}
|
||||
@@ -155,7 +155,6 @@
|
||||
#include "ngraph/op/squeeze.hpp"
|
||||
#include "ngraph/op/strided_slice.hpp"
|
||||
#include "ngraph/op/subtract.hpp"
|
||||
#include "ngraph/op/sum.hpp"
|
||||
#include "ngraph/op/swish.hpp"
|
||||
#include "ngraph/op/tan.hpp"
|
||||
#include "ngraph/op/tanh.hpp"
|
||||
|
||||
@@ -19,7 +19,6 @@
|
||||
#include "ngraph/attribute_visitor.hpp"
|
||||
#include "ngraph/op/broadcast.hpp"
|
||||
#include "ngraph/op/constant.hpp"
|
||||
#include "ngraph/op/sum.hpp"
|
||||
#include "ngraph/partial_shape.hpp"
|
||||
|
||||
#include <numeric>
|
||||
|
||||
@@ -25,7 +25,6 @@
|
||||
#include "ngraph/op/multiply.hpp"
|
||||
#include "ngraph/op/reshape.hpp"
|
||||
#include "ngraph/op/subtract.hpp"
|
||||
#include "ngraph/op/sum.hpp"
|
||||
#include "ngraph/op/util/op_types.hpp"
|
||||
#include "ngraph/runtime/reference/softmax.hpp"
|
||||
#include "ngraph/util.hpp"
|
||||
|
||||
@@ -1,99 +0,0 @@
|
||||
//*****************************************************************************
|
||||
// Copyright 2017-2020 Intel Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//*****************************************************************************
|
||||
|
||||
#include "ngraph/op/sum.hpp"
|
||||
#include "itt.hpp"
|
||||
#include "ngraph/graph_util.hpp"
|
||||
#include "ngraph/op/broadcast.hpp"
|
||||
#include "ngraph/runtime/host_tensor.hpp"
|
||||
#include "ngraph/runtime/reference/sum.hpp"
|
||||
#include "ngraph/shape_util.hpp"
|
||||
|
||||
NGRAPH_SUPPRESS_DEPRECATED_START
|
||||
|
||||
using namespace std;
|
||||
using namespace ngraph;
|
||||
|
||||
constexpr NodeTypeInfo op::v0::Sum::type_info;
|
||||
|
||||
op::v0::Sum::Sum(const Output<Node>& arg, const AxisSet& reduction_axes)
|
||||
: ArithmeticReduction(arg, reduction_axes)
|
||||
{
|
||||
constructor_validate_and_infer_types();
|
||||
}
|
||||
|
||||
op::v0::Sum::Sum(const Output<Node>& arg, const Output<Node>& reduction_axes)
|
||||
: ArithmeticReduction(arg, reduction_axes)
|
||||
{
|
||||
constructor_validate_and_infer_types();
|
||||
}
|
||||
|
||||
shared_ptr<Node> op::Sum::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
{
|
||||
check_new_args_count(this, new_args);
|
||||
return make_shared<op::v0::Sum>(new_args.at(0), new_args.at(1));
|
||||
}
|
||||
|
||||
shared_ptr<Node> op::v0::Sum::get_default_value() const
|
||||
{
|
||||
return ngraph::make_constant_from_string("0", get_element_type(), get_shape());
|
||||
}
|
||||
|
||||
namespace sum
|
||||
{
|
||||
template <element::Type_t ET>
|
||||
bool evaluate(const HostTensorPtr& arg,
|
||||
const HostTensorPtr& out,
|
||||
const AxisSet& axes,
|
||||
bool keep_dims)
|
||||
{
|
||||
out->set_shape(reduce(arg->get_shape(), axes, false));
|
||||
runtime::reference::sum(
|
||||
arg->get_data_ptr<ET>(), out->get_data_ptr<ET>(), arg->get_shape(), axes, keep_dims);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool evaluate_sum(const HostTensorPtr& arg,
|
||||
const HostTensorPtr& out,
|
||||
const AxisSet& axes,
|
||||
bool keep_dims)
|
||||
{
|
||||
bool rc = true;
|
||||
switch (arg->get_element_type())
|
||||
{
|
||||
TYPE_CASE(i32)(arg, out, axes, keep_dims);
|
||||
break;
|
||||
TYPE_CASE(i64)(arg, out, axes, keep_dims);
|
||||
break;
|
||||
TYPE_CASE(u32)(arg, out, axes, keep_dims);
|
||||
break;
|
||||
TYPE_CASE(u64)(arg, out, axes, keep_dims);
|
||||
break;
|
||||
TYPE_CASE(f16)(arg, out, axes, keep_dims);
|
||||
break;
|
||||
TYPE_CASE(f32)(arg, out, axes, keep_dims);
|
||||
break;
|
||||
default: rc = false; break;
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
}
|
||||
|
||||
bool op::v0::Sum::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
|
||||
{
|
||||
OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v0::Sum::evaluate");
|
||||
return sum::evaluate_sum(inputs[0], outputs[0], get_reduction_axes(), false);
|
||||
}
|
||||
@@ -19,7 +19,6 @@
|
||||
#include "ngraph/attribute_visitor.hpp"
|
||||
#include "ngraph/op/concat.hpp"
|
||||
#include "ngraph/op/constant.hpp"
|
||||
#include "ngraph/op/sum.hpp"
|
||||
#include "ngraph/op/util/op_types.hpp"
|
||||
#include "ngraph/partial_shape.hpp"
|
||||
|
||||
|
||||
@@ -22,7 +22,6 @@
|
||||
#include "ngraph/op/reduce_mean.hpp"
|
||||
#include "ngraph/op/reduce_prod.hpp"
|
||||
#include "ngraph/op/reduce_sum.hpp"
|
||||
#include "ngraph/op/sum.hpp"
|
||||
#include "ngraph/runtime/reference/max.hpp"
|
||||
#include "ngraph/runtime/reference/mean.hpp"
|
||||
#include "ngraph/runtime/reference/min.hpp"
|
||||
@@ -66,14 +65,6 @@ static shared_ptr<op::Constant>
|
||||
reduce_prod->get_reduction_axes(),
|
||||
reduce_prod->get_keep_dims());
|
||||
}
|
||||
else if (auto sum = as_type_ptr<op::Sum>(reduction_node))
|
||||
{
|
||||
runtime::reference::sum<T>(constant->get_data_ptr<T>(),
|
||||
data_ptr,
|
||||
constant->get_output_shape(0),
|
||||
sum->get_reduction_axes(),
|
||||
false);
|
||||
}
|
||||
else if (auto reduce_sum = as_type_ptr<op::v1::ReduceSum>(reduction_node))
|
||||
{
|
||||
runtime::reference::sum<T>(constant->get_data_ptr<T>(),
|
||||
@@ -159,7 +150,7 @@ void pass::ConstantFolding::construct_constant_arithmetic_reduction()
|
||||
auto constant_axes_label =
|
||||
make_shared<pattern::op::Label>(element::i64, Shape{2}, pattern::has_class<op::Constant>());
|
||||
auto is_supported_reduction = [](std::shared_ptr<Node> n) {
|
||||
return (pattern::has_class<op::Sum>()(n) || pattern::has_class<op::v1::ReduceMax>()(n) ||
|
||||
return (pattern::has_class<op::v1::ReduceMax>()(n) ||
|
||||
pattern::has_class<op::v1::ReduceMin>()(n) ||
|
||||
pattern::has_class<op::v1::ReduceProd>()(n) ||
|
||||
pattern::has_class<op::v1::ReduceSum>()(n) ||
|
||||
|
||||
@@ -178,7 +178,6 @@ set(SRC
|
||||
type_prop/split.cpp
|
||||
type_prop/squared_difference.cpp
|
||||
type_prop/squeeze.cpp
|
||||
type_prop/sum.cpp
|
||||
type_prop/swish.cpp
|
||||
type_prop/reduce_prod.cpp
|
||||
type_prop/reduce_sum.cpp
|
||||
@@ -339,7 +338,6 @@ set(MULTI_TEST_SRC
|
||||
backend/split.in.cpp
|
||||
backend/sqrt.in.cpp
|
||||
backend/subtract.in.cpp
|
||||
backend/sum.in.cpp
|
||||
backend/tan.in.cpp
|
||||
backend/tanh.in.cpp
|
||||
backend/tile.in.cpp
|
||||
|
||||
@@ -1,785 +0,0 @@
|
||||
//*****************************************************************************
|
||||
// Copyright 2017-2020 Intel Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//*****************************************************************************
|
||||
|
||||
#include <algorithm>
|
||||
#include <cinttypes>
|
||||
#include <cmath>
|
||||
#include <cstdlib>
|
||||
#include <numeric>
|
||||
#include <random>
|
||||
#include <string>
|
||||
|
||||
#include "gtest/gtest.h"
|
||||
#include "ngraph/ngraph.hpp"
|
||||
#include "ngraph/runtime/tensor.hpp"
|
||||
#include "runtime/backend.hpp"
|
||||
#include "util/all_close.hpp"
|
||||
#include "util/all_close_f.hpp"
|
||||
#include "util/ndarray.hpp"
|
||||
#include "util/random.hpp"
|
||||
#include "util/test_control.hpp"
|
||||
#include "util/test_tools.hpp"
|
||||
|
||||
static std::mt19937_64 random_generator;
|
||||
|
||||
NGRAPH_SUPPRESS_DEPRECATED_START
|
||||
|
||||
using namespace std;
|
||||
using namespace ngraph;
|
||||
|
||||
static string s_manifest = "${MANIFEST}";
|
||||
|
||||
// Trivial case with no summed axes.
|
||||
NGRAPH_TEST(${BACKEND_NAME}, sum_trivial)
|
||||
{
|
||||
Shape shape{2, 2};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape);
|
||||
auto f = make_shared<Function>(make_shared<op::Sum>(A, AxisSet{}), ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::f32, shape);
|
||||
copy_data(a, vector<float>{1, 2, 3, 4});
|
||||
auto result = backend->create_tensor(element::f32, shape);
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{1, 2, 3, 4}), read_vector<float>(result)));
|
||||
}
|
||||
|
||||
// Failure has been reported at 5D for some reason
|
||||
NGRAPH_TEST(${BACKEND_NAME}, sum_trivial_5d)
|
||||
{
|
||||
Shape shape{2, 2, 2, 2, 2};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape);
|
||||
auto f = make_shared<Function>(make_shared<op::Sum>(A, AxisSet{}), ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::f32, shape);
|
||||
copy_data(a, vector<float>{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
|
||||
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1});
|
||||
auto result = backend->create_tensor(element::f32, shape);
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
|
||||
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}),
|
||||
read_vector<float>(result)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, sum_to_scalar)
|
||||
{
|
||||
Shape shape{2, 2};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape);
|
||||
auto f = make_shared<Function>(make_shared<op::Sum>(A, AxisSet{0, 1}), ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::f32, shape);
|
||||
copy_data(a, vector<float>{1, 2, 3, 4});
|
||||
auto result = backend->create_tensor(element::f32, Shape{});
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{10}), read_vector<float>(result)));
|
||||
|
||||
// For some reason I'm feeling extra paranoid about making sure reduction doesn't clobber the
|
||||
// input tensors, so let's do this too.
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{1, 2, 3, 4}), read_vector<float>(a)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, sum_large_1d_to_scalar)
|
||||
{
|
||||
Shape shape{1000000};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape);
|
||||
auto f = make_shared<Function>(make_shared<op::Sum>(A, AxisSet{0}), ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
random_generator.seed(2);
|
||||
vector<float> v_a(1000000, 0);
|
||||
double r = 0;
|
||||
for (int i = 0; i < 1000000; i++)
|
||||
{
|
||||
v_a[i] = static_cast<float>(random_generator() % 255);
|
||||
r += static_cast<double>(v_a[i]);
|
||||
}
|
||||
auto a = backend->create_tensor(element::f32, shape);
|
||||
copy_data(a, v_a);
|
||||
auto result = backend->create_tensor(element::f32, Shape{});
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
|
||||
EXPECT_TRUE(
|
||||
test::all_close_f(vector<float>{static_cast<float>(r)}, read_vector<float>(result)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, sum_matrix_columns)
|
||||
{
|
||||
Shape shape_a{3, 2};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
Shape shape_rt{2};
|
||||
auto f = make_shared<Function>(make_shared<op::Sum>(A, AxisSet{0}), ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::f32, shape_a);
|
||||
copy_data(a, vector<float>{1, 2, 3, 4, 5, 6});
|
||||
auto result = backend->create_tensor(element::f32, shape_rt);
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{9, 12}), read_vector<float>(result)));
|
||||
|
||||
// For some reason I'm feeling extra paranoid about making sure reduction doesn't clobber the
|
||||
// input tensors, so let's do this too.
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{1, 2, 3, 4, 5, 6}), read_vector<float>(a)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, sum_matrix_6d)
|
||||
{
|
||||
Shape shape_a{2, 6, 4, 5, 7, 3};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
Shape shape_rt{2, 4, 5, 3};
|
||||
auto f = make_shared<Function>(make_shared<op::Sum>(A, AxisSet{1, 4}), ParameterVector{A});
|
||||
|
||||
auto backend_wrk = runtime::Backend::create("${BACKEND_NAME}");
|
||||
auto backend_ref = runtime::Backend::create("INTERPRETER");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a_wrk = backend_wrk->create_tensor(element::f32, shape_a);
|
||||
auto a_ref = backend_ref->create_tensor(element::f32, shape_a);
|
||||
auto result_wrk = backend_wrk->create_tensor(element::f32, shape_rt);
|
||||
auto result_ref = backend_ref->create_tensor(element::f32, shape_rt);
|
||||
|
||||
vector<float> inp_data(shape_size<const Shape>(shape_a));
|
||||
iota(inp_data.begin(), inp_data.end(), 1.f);
|
||||
copy_data(a_wrk, inp_data);
|
||||
copy_data(a_ref, inp_data);
|
||||
|
||||
auto handle_wrk = backend_wrk->compile(f);
|
||||
auto handle_ref = backend_ref->compile(f);
|
||||
handle_wrk->call_with_validate({result_wrk}, {a_wrk});
|
||||
handle_ref->call_with_validate({result_ref}, {a_ref});
|
||||
|
||||
EXPECT_TRUE(test::all_close_f(read_vector<float>(result_ref), read_vector<float>(result_wrk)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, sum_matrix_rows)
|
||||
{
|
||||
Shape shape_a{3, 2};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
Shape shape_rt{3};
|
||||
auto f = make_shared<Function>(make_shared<op::Sum>(A, AxisSet{1}), ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::f32, shape_a);
|
||||
copy_data(a, vector<float>{1, 2, 3, 4, 5, 6});
|
||||
auto result = backend->create_tensor(element::f32, shape_rt);
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{3, 7, 11}), read_vector<float>(result)));
|
||||
|
||||
// For some reason I'm feeling extra paranoid about making sure reduction doesn't clobber the
|
||||
// input tensors, so let's do this too.
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{1, 2, 3, 4, 5, 6}), read_vector<float>(a)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, sum_matrix_rows_zero)
|
||||
{
|
||||
Shape shape_a{3, 0};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
Shape shape_rt{3};
|
||||
auto f = make_shared<Function>(make_shared<op::Sum>(A, AxisSet{1}), ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::f32, shape_a);
|
||||
copy_data(a, vector<float>{});
|
||||
auto result = backend->create_tensor(element::f32, shape_rt);
|
||||
copy_data(result, vector<float>({3, 3, 3}));
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{0, 0, 0}), read_vector<float>(result)));
|
||||
|
||||
// For some reason I'm feeling extra paranoid about making sure reduction doesn't clobber the
|
||||
// input tensors, so let's do this too.
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{}), read_vector<float>(a)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, sum_matrix_cols_zero)
|
||||
{
|
||||
// Now the reduction (g(x:float32[2,2],y:float32[]) = reduce(x,y,f,axes={})).
|
||||
Shape shape_a{0, 2};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
Shape shape_rt{2};
|
||||
auto f = make_shared<Function>(make_shared<op::Sum>(A, AxisSet{0}), ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::f32, shape_a);
|
||||
copy_data(a, vector<float>{});
|
||||
auto result = backend->create_tensor(element::f32, shape_rt);
|
||||
copy_data(result, vector<float>({3, 3}));
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{0, 0}), read_vector<float>(result)));
|
||||
|
||||
// For some reason I'm feeling extra paranoid about making sure reduction doesn't clobber the
|
||||
// input tensors, so let's do this too.
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{}), read_vector<float>(a)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, sum_vector_zero)
|
||||
{
|
||||
Shape shape_a{0};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
Shape shape_rt{};
|
||||
auto f = make_shared<Function>(make_shared<op::Sum>(A, AxisSet{0}), ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::f32, shape_a);
|
||||
copy_data(a, vector<float>{});
|
||||
auto result = backend->create_tensor(element::f32, shape_rt);
|
||||
copy_data(result, vector<float>({3}));
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{0}), read_vector<float>(result)));
|
||||
|
||||
// For some reason I'm feeling extra paranoid about making sure reduction doesn't clobber the
|
||||
// input tensors, so let's do this too.
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{}), read_vector<float>(a)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, sum_matrix_to_scalar_zero_by_zero)
|
||||
{
|
||||
Shape shape_a{0, 0};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
Shape shape_rt{};
|
||||
auto f = make_shared<Function>(make_shared<op::Sum>(A, AxisSet{0, 1}), ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::f32, shape_a);
|
||||
copy_data(a, vector<float>{});
|
||||
auto result = backend->create_tensor(element::f32, shape_rt);
|
||||
copy_data(result, vector<float>({3}));
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{0}), read_vector<float>(result)));
|
||||
|
||||
// For some reason I'm feeling extra paranoid about making sure reduction doesn't clobber the
|
||||
// input tensors, so let's do this too.
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{}), read_vector<float>(a)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, sum_3d_to_matrix_most_sig)
|
||||
{
|
||||
Shape shape_a{3, 3, 3};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
Shape shape_rt{3, 3};
|
||||
auto f = make_shared<Function>(make_shared<op::Sum>(A, AxisSet{0}), ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::f32, shape_a);
|
||||
copy_data(a, vector<float>{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
|
||||
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27});
|
||||
auto result = backend->create_tensor(element::f32, shape_rt);
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{1 + 10 + 19,
|
||||
2 + 11 + 20,
|
||||
3 + 12 + 21,
|
||||
4 + 13 + 22,
|
||||
5 + 14 + 23,
|
||||
6 + 15 + 24,
|
||||
7 + 16 + 25,
|
||||
8 + 17 + 26,
|
||||
9 + 18 + 27}),
|
||||
read_vector<float>(result)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, sum_3d_to_matrix_least_sig)
|
||||
{
|
||||
Shape shape_a{3, 3, 3};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
Shape shape_rt{3, 3};
|
||||
auto f = make_shared<Function>(make_shared<op::Sum>(A, AxisSet{2}), ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::f32, shape_a);
|
||||
copy_data(a, vector<float>{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
|
||||
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27});
|
||||
auto result = backend->create_tensor(element::f32, shape_rt);
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{1 + 2 + 3,
|
||||
4 + 5 + 6,
|
||||
7 + 8 + 9,
|
||||
10 + 11 + 12,
|
||||
13 + 14 + 15,
|
||||
16 + 17 + 18,
|
||||
19 + 20 + 21,
|
||||
22 + 23 + 24,
|
||||
25 + 26 + 27}),
|
||||
read_vector<float>(result)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, sum_3d_to_vector)
|
||||
{
|
||||
Shape shape_a{3, 3, 3};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
Shape shape_rt{3};
|
||||
auto f = make_shared<Function>(make_shared<op::Sum>(A, AxisSet{0, 1}), ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::f32, shape_a);
|
||||
copy_data(a, vector<float>{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
|
||||
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27});
|
||||
auto result = backend->create_tensor(element::f32, shape_rt);
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{1 + 10 + 19 + 4 + 13 + 22 + 7 + 16 + 25,
|
||||
2 + 11 + 20 + 5 + 14 + 23 + 8 + 17 + 26,
|
||||
3 + 12 + 21 + 6 + 15 + 24 + 9 + 18 + 27}),
|
||||
read_vector<float>(result)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, sum_3d_to_scalar)
|
||||
{
|
||||
Shape shape_a{3, 3, 3};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
Shape shape_rt{};
|
||||
auto f = make_shared<Function>(make_shared<op::Sum>(A, AxisSet{0, 1, 2}), ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::f32, shape_a);
|
||||
copy_data(a, vector<float>{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
|
||||
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27});
|
||||
auto result = backend->create_tensor(element::f32, shape_rt);
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(test::all_close_f(
|
||||
(vector<float>{1 + 10 + 19 + 4 + 13 + 22 + 7 + 16 + 25 + 2 + 11 + 20 + 5 + 14 + 23 + 8 +
|
||||
17 + 26 + 3 + 12 + 21 + 6 + 15 + 24 + 9 + 18 + 27}),
|
||||
read_vector<float>(result)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, sum_3d_to_scalar_int32)
|
||||
{
|
||||
Shape shape_a{3, 3, 3};
|
||||
auto A = make_shared<op::Parameter>(element::i32, shape_a);
|
||||
Shape shape_rt{};
|
||||
auto f = make_shared<Function>(make_shared<op::Sum>(A, AxisSet{0, 1, 2}), ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::i32, shape_a);
|
||||
copy_data(a, vector<int32_t>{0x40000001, 10, 19, 4, 13, 22, 7, 16, 25, 2, 11, 20, 5, 14,
|
||||
23, 8, 17, 26, 3, 12, 21, 6, 15, 24, 9, 18, 27});
|
||||
auto result = backend->create_tensor(element::i32, shape_rt);
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_EQ((vector<int32_t>{0x40000001 + 10 + 19 + 4 + 13 + 22 + 7 + 16 + 25 + 2 + 11 + 20 + 5 +
|
||||
14 + 23 + 8 + 17 + 26 + 3 + 12 + 21 + 6 + 15 + 24 + 9 + 18 + 27}),
|
||||
read_vector<int32_t>(result));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, sum_3d_eliminate_zero_dim)
|
||||
{
|
||||
Shape shape_a{3, 0, 2};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
Shape shape_rt{3, 2};
|
||||
auto f = make_shared<Function>(make_shared<op::Sum>(A, AxisSet{1}), ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::f32, shape_a);
|
||||
copy_data(a, vector<float>{});
|
||||
auto result = backend->create_tensor(element::f32, shape_rt);
|
||||
|
||||
// Overwrite the initial result vector to make sure we're not just coincidentally getting the
|
||||
// right value.
|
||||
copy_data(result, vector<float>{2112, 2112, 2112, 2112, 2112, 2112});
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(test::all_close_f((vector<float>{0, 0, 0, 0, 0, 0}), read_vector<float>(result)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, sum_3d_eliminate_zero_dim_int32)
|
||||
{
|
||||
Shape shape_a{3, 0, 2};
|
||||
auto A = make_shared<op::Parameter>(element::i32, shape_a);
|
||||
Shape shape_rt{3, 2};
|
||||
auto f = make_shared<Function>(make_shared<op::Sum>(A, AxisSet{1}), ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::i32, shape_a);
|
||||
copy_data(a, vector<int32_t>{});
|
||||
auto result = backend->create_tensor(element::i32, shape_rt);
|
||||
|
||||
// Overwrite the initial result vector to make sure we're not just coincidentally getting the
|
||||
// right value.
|
||||
copy_data(result, vector<int32_t>{2112, 2112, 2112, 2112, 2112, 2112});
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_EQ((vector<int32_t>{0, 0, 0, 0, 0, 0}), read_vector<int32_t>(result));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, sum_5d_to_scalar)
|
||||
{
|
||||
Shape shape_a{3, 3, 3, 3, 3};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
Shape shape_rt{};
|
||||
auto f =
|
||||
make_shared<Function>(make_shared<op::Sum>(A, AxisSet{0, 1, 2, 3, 4}), ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::f32, shape_a);
|
||||
copy_data(a, std::vector<float>(std::pow(3, 5), 1));
|
||||
auto result = backend->create_tensor(element::f32, shape_rt);
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(test::all_close_f(std::vector<float>{243.}, read_vector<float>(result)));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, sum_5d_to_scalar_int32)
|
||||
{
|
||||
Shape shape_a{3, 3, 3, 3, 3};
|
||||
auto A = make_shared<op::Parameter>(element::i32, shape_a);
|
||||
Shape shape_rt{};
|
||||
auto f =
|
||||
make_shared<Function>(make_shared<op::Sum>(A, AxisSet{0, 1, 2, 3, 4}), ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::i32, shape_a);
|
||||
copy_data(a, std::vector<int32_t>(std::pow(3, 5), 1));
|
||||
auto result = backend->create_tensor(element::i32, shape_rt);
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_EQ(std::vector<int32_t>{243}, read_vector<int32_t>(result));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, sum_2d_to_scalar_int8)
|
||||
{
|
||||
Shape shape_a{3, 3};
|
||||
auto A = make_shared<op::Parameter>(element::i8, shape_a);
|
||||
Shape shape_rt{};
|
||||
auto f = make_shared<Function>(make_shared<op::Sum>(A, AxisSet{0, 1}), ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::i8, shape_a);
|
||||
copy_data(a, std::vector<int8_t>{1, 2, 3, 4, 5, 6, 7, 8, 9});
|
||||
auto result = backend->create_tensor(element::i8, shape_rt);
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_EQ(std::vector<int8_t>{45}, read_vector<int8_t>(result));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, sum_trivial_in_double)
|
||||
{
|
||||
Shape shape{4, 3};
|
||||
Shape rshape{3};
|
||||
auto A = make_shared<op::Parameter>(element::f64, shape);
|
||||
auto f = make_shared<Function>(make_shared<op::Sum>(A, AxisSet{0}), ParameterVector{A});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::f64, shape);
|
||||
copy_data(a, vector<double>{12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7});
|
||||
auto result = backend->create_tensor(element::f64, rshape);
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
EXPECT_TRUE(test::all_close_f((vector<double>{30, 22, 26}), read_vector<double>(result)));
|
||||
}
|
||||
|
||||
#if NGRAPH_INTERPRETER_ENABLE
|
||||
|
||||
#ifndef _WIN32
|
||||
NGRAPH_TEST(${BACKEND_NAME}, sum_stable_acc)
|
||||
{
|
||||
std::string backend_name = "${BACKEND_NAME}";
|
||||
if (backend_name == "INTERPRETER")
|
||||
{
|
||||
return;
|
||||
}
|
||||
Shape shape_a{10, 10, 10, 30};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
|
||||
Shape shape_rt{10};
|
||||
auto f = make_shared<Function>(make_shared<op::Sum>(A, AxisSet{1, 2, 3}), ParameterVector{A});
|
||||
|
||||
test::Uniform<float> rng(1000.0f, 1000.1f, 2112);
|
||||
vector<vector<float>> args;
|
||||
for (shared_ptr<op::Parameter> param : f->get_parameters())
|
||||
{
|
||||
vector<float> tensor_val(shape_size(param->get_shape()));
|
||||
rng.initialize(tensor_val);
|
||||
args.push_back(tensor_val);
|
||||
}
|
||||
|
||||
auto ref_func = clone_function(*f);
|
||||
auto bk_func = clone_function(*f);
|
||||
|
||||
auto ref_results = execute(ref_func, args, "INTERPRETER");
|
||||
auto bk_results = execute(bk_func, args, "${BACKEND_NAME}");
|
||||
|
||||
EXPECT_TRUE(
|
||||
test::all_close_f(ref_results.at(0), bk_results.at(0), DEFAULT_FLOAT_TOLERANCE_BITS + 1));
|
||||
}
|
||||
#endif
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, sum_stable_acc_double)
|
||||
{
|
||||
std::string backend_name = "${BACKEND_NAME}";
|
||||
if (backend_name == "INTERPRETER")
|
||||
{
|
||||
return;
|
||||
}
|
||||
Shape shape_a{10, 10, 20, 300};
|
||||
auto A = make_shared<op::Parameter>(element::f64, shape_a);
|
||||
|
||||
Shape shape_rt{10};
|
||||
auto f = make_shared<Function>(make_shared<op::Sum>(A, AxisSet{1, 2, 3}), ParameterVector{A});
|
||||
|
||||
test::Uniform<double> rng(1000000000.0L, 1000000000.001L, 2112);
|
||||
vector<vector<double>> args;
|
||||
for (shared_ptr<op::Parameter> param : f->get_parameters())
|
||||
{
|
||||
vector<double> tensor_val(shape_size(param->get_shape()));
|
||||
rng.initialize(tensor_val);
|
||||
args.push_back(tensor_val);
|
||||
}
|
||||
|
||||
auto ref_func = clone_function(*f);
|
||||
auto bk_func = clone_function(*f);
|
||||
|
||||
auto ref_results = execute(ref_func, args, "INTERPRETER");
|
||||
auto bk_results = execute(bk_func, args, "${BACKEND_NAME}");
|
||||
|
||||
EXPECT_TRUE(test::all_close(ref_results.at(0), bk_results.at(0), 0.0, 1e-5));
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, sum_stable_simple_float)
|
||||
{
|
||||
std::string backend_name = "${BACKEND_NAME}";
|
||||
if (backend_name == "INTERPRETER")
|
||||
{
|
||||
return;
|
||||
}
|
||||
Shape shape_a{20};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape_a);
|
||||
|
||||
Shape shape_rt{};
|
||||
auto f = make_shared<Function>(make_shared<op::Sum>(A, AxisSet{0}), ParameterVector{A});
|
||||
|
||||
vector<vector<float>> args;
|
||||
args.push_back(vector<float>{10000000.0f, 0.9f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f,
|
||||
0.8f, 0.1f, 0.9f, 0.5f, 0.2f, 0.3f, 0.4f,
|
||||
0.5f, 0.6f, 0.7f, 0.8f, 0.9f, 0.1f});
|
||||
|
||||
auto ref_func = clone_function(*f);
|
||||
auto bk_func = clone_function(*f);
|
||||
|
||||
auto ref_results = execute(ref_func, args, "INTERPRETER");
|
||||
auto bk_results = execute(bk_func, args, "${BACKEND_NAME}");
|
||||
|
||||
EXPECT_TRUE(
|
||||
test::all_close_f(ref_results.at(0), bk_results.at(0), DEFAULT_FLOAT_TOLERANCE_BITS - 1));
|
||||
}
|
||||
|
||||
#ifndef _WIN32
|
||||
NGRAPH_TEST(${BACKEND_NAME}, sum_stable_simple_double)
|
||||
{
|
||||
std::string backend_name = "${BACKEND_NAME}";
|
||||
if (backend_name == "INTERPRETER")
|
||||
{
|
||||
return;
|
||||
}
|
||||
Shape shape_a{20};
|
||||
auto A = make_shared<op::Parameter>(element::f64, shape_a);
|
||||
|
||||
Shape shape_rt{};
|
||||
auto f = make_shared<Function>(make_shared<op::Sum>(A, AxisSet{0}), ParameterVector{A});
|
||||
|
||||
vector<vector<double>> args;
|
||||
args.push_back(vector<double>{10000000000000000.0L,
|
||||
0.2L,
|
||||
0.3L,
|
||||
0.4L,
|
||||
0.5L,
|
||||
0.6L,
|
||||
0.7L,
|
||||
0.8L,
|
||||
0.9L,
|
||||
0.7L,
|
||||
0.9L,
|
||||
0.7L,
|
||||
0.3L,
|
||||
0.6L,
|
||||
0.8L,
|
||||
0.4L,
|
||||
0.6L,
|
||||
0.5L,
|
||||
0.8L,
|
||||
0.7L});
|
||||
|
||||
auto ref_func = clone_function(*f);
|
||||
auto bk_func = clone_function(*f);
|
||||
|
||||
auto ref_results = execute(ref_func, args, "INTERPRETER");
|
||||
auto bk_results = execute(bk_func, args, "${BACKEND_NAME}");
|
||||
|
||||
EXPECT_TRUE(test::all_close(ref_results.at(0), bk_results.at(0), 0.0, 2.0));
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, sum_dynamic)
|
||||
{
|
||||
// Create a graph for f(x,axes:int32) = Sum(x,Convert<int64>(axes)).
|
||||
auto x = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
|
||||
auto axes = make_shared<op::Parameter>(element::i32, PartialShape{Dimension::dynamic()});
|
||||
auto axes_i64 = make_shared<op::Convert>(axes, element::i64);
|
||||
|
||||
auto sum = make_shared<op::Sum>(x, axes_i64);
|
||||
ASSERT_TRUE(sum->get_output_partial_shape(0).rank().is_dynamic());
|
||||
|
||||
auto f = make_shared<Function>(NodeVector{sum}, ParameterVector{x, axes});
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}", true);
|
||||
|
||||
auto ex = backend->compile(f);
|
||||
|
||||
auto t_r = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic());
|
||||
|
||||
std::vector<Shape> x_shapes{
|
||||
Shape{2, 3}, Shape{2, 3}, Shape{2, 3}, Shape{2, 3}, Shape{5}, Shape{5}};
|
||||
std::vector<std::vector<int32_t>> axeses{{}, {0}, {1}, {0, 1}, {}, {0}};
|
||||
std::vector<std::vector<float>> inputs{{1, 2, 3, 4, 5, 6},
|
||||
{1, 2, 3, 4, 5, 6},
|
||||
{1, 2, 3, 4, 5, 6},
|
||||
{1, 2, 3, 4, 5, 6},
|
||||
{1, 2, 3, 4, 5},
|
||||
{1, 2, 3, 4, 5}};
|
||||
std::vector<Shape> expected_result_shapes{
|
||||
Shape{2, 3}, Shape{3}, Shape{2}, Shape{}, Shape{5}, Shape{}};
|
||||
std::vector<std::vector<float>> expected_results{
|
||||
{1, 2, 3, 4, 5, 6}, {5, 7, 9}, {6, 15}, {21}, {1, 2, 3, 4, 5}, {15}};
|
||||
|
||||
for (size_t i = 0; i < x_shapes.size(); i++)
|
||||
{
|
||||
auto t_x = backend->create_tensor(element::f32, x_shapes[i]);
|
||||
auto t_axes = backend->create_tensor(element::i32, Shape{axeses[i].size()});
|
||||
|
||||
copy_data(t_x, inputs[i]);
|
||||
copy_data(t_axes, axeses[i]);
|
||||
|
||||
ex->call_with_validate({t_r}, {t_x, t_axes});
|
||||
|
||||
ASSERT_EQ(t_r->get_shape(), expected_result_shapes[i]);
|
||||
|
||||
auto results = read_vector<float>(t_r);
|
||||
|
||||
ASSERT_TRUE(test::all_close_f(results, expected_results[i], MIN_FLOAT_TOLERANCE_BITS));
|
||||
}
|
||||
}
|
||||
|
||||
NGRAPH_TEST(${BACKEND_NAME}, sum_inf)
|
||||
{
|
||||
Shape shape{7, 4};
|
||||
auto A = make_shared<op::Parameter>(element::f32, shape);
|
||||
auto f = make_shared<Function>(make_shared<op::Sum>(A, AxisSet{1}), ParameterVector{A});
|
||||
|
||||
auto infi = std::numeric_limits<float>::infinity();
|
||||
|
||||
auto backend = runtime::Backend::create("${BACKEND_NAME}");
|
||||
|
||||
// Create some tensors for input/output
|
||||
auto a = backend->create_tensor(element::f32, shape);
|
||||
copy_data(a,
|
||||
test::NDArray<float, 2>({{-infi, 0, 0, infi},
|
||||
{infi, 100, -100, -infi},
|
||||
{infi, 0, 100, infi},
|
||||
{-infi, -100, 0, -infi},
|
||||
{infi, infi, infi, infi},
|
||||
{infi, infi, infi, -infi},
|
||||
{infi, std::nanf(""), 42, infi}})
|
||||
.get_vector());
|
||||
auto result = backend->create_tensor(element::f32, Shape{7});
|
||||
|
||||
auto handle = backend->compile(f);
|
||||
handle->call_with_validate({result}, {a});
|
||||
auto r = read_vector<float>(result);
|
||||
ASSERT_EQ(r.size(), 7);
|
||||
EXPECT_TRUE(isnan(r[0]));
|
||||
EXPECT_TRUE(isnan(r[1]));
|
||||
EXPECT_TRUE(r[2] > 0 && isinf(r[2]));
|
||||
EXPECT_TRUE(r[3] < 0 && isinf(r[3]));
|
||||
EXPECT_TRUE(r[4] > 0 && isinf(r[4]));
|
||||
EXPECT_TRUE(isnan(r[5]));
|
||||
EXPECT_TRUE(isnan(r[6]));
|
||||
}
|
||||
@@ -857,34 +857,6 @@ TEST(constant_folding, const_reduceprod_keepdims)
|
||||
ASSERT_EQ(values_expected, values_out);
|
||||
}
|
||||
|
||||
TEST(constant_folding, const_sum)
|
||||
{
|
||||
Shape input_shape{3, 3};
|
||||
|
||||
vector<int32_t> values_in{1, 2, 3, 4, 5, 6, 7, 8, 9};
|
||||
auto constant = op::Constant::create(element::i32, input_shape, values_in);
|
||||
auto convert = make_shared<op::Sum>(constant, AxisSet{1});
|
||||
convert->set_friendly_name("test");
|
||||
auto f = make_shared<Function>(convert, ParameterVector{});
|
||||
|
||||
pass::Manager pass_manager;
|
||||
pass_manager.register_pass<pass::ConstantFolding>();
|
||||
pass_manager.run_passes(f);
|
||||
|
||||
ASSERT_EQ(count_ops_of_type<op::Sum>(f), 0);
|
||||
ASSERT_EQ(count_ops_of_type<op::Constant>(f), 1);
|
||||
|
||||
auto new_const =
|
||||
as_type_ptr<op::Constant>(f->get_results().at(0)->input_value(0).get_node_shared_ptr());
|
||||
ASSERT_TRUE(new_const);
|
||||
ASSERT_EQ(new_const->get_friendly_name(), "test");
|
||||
auto values_out = new_const->get_vector<int32_t>();
|
||||
|
||||
vector<int32_t> values_expected{6, 15, 24};
|
||||
|
||||
ASSERT_EQ(values_expected, values_out);
|
||||
}
|
||||
|
||||
TEST(constant_folding, const_reducesum)
|
||||
{
|
||||
Shape input_shape{3, 3};
|
||||
|
||||
@@ -268,6 +268,26 @@ TEST(copy, power)
|
||||
ASSERT_TRUE(check_binary<op::Power>());
|
||||
}
|
||||
|
||||
TEST(copy, reduce_sum)
|
||||
{
|
||||
Shape shape{4, 3};
|
||||
AxisSet axes{1};
|
||||
auto arg0 = make_shared<op::Parameter>(element::f32, shape);
|
||||
|
||||
auto axes_node = op::Constant::create(element::i64, {axes.size()}, axes.to_vector());
|
||||
auto node = make_shared<op::v1::ReduceSum>(arg0, axes_node, true);
|
||||
OutputVector new_args{make_shared<op::Parameter>(element::f32, shape),
|
||||
op::Constant::create(element::i64, {axes.size()}, axes.to_vector())};
|
||||
auto new_node = node->clone_with_new_inputs(new_args);
|
||||
auto node_cast = as_type_ptr<op::v1::ReduceSum>(new_node);
|
||||
ASSERT_NE(node_cast, nullptr);
|
||||
|
||||
ASSERT_TRUE(nullptr != new_node);
|
||||
ASSERT_TRUE(new_args == new_node->input_values());
|
||||
ASSERT_TRUE(axes == node_cast->get_reduction_axes());
|
||||
ASSERT_TRUE(true == node_cast->get_keep_dims());
|
||||
}
|
||||
|
||||
TEST(copy, reshape)
|
||||
{
|
||||
Shape shape_in{2, 3, 4};
|
||||
@@ -370,24 +390,6 @@ TEST(copy, subtract)
|
||||
ASSERT_TRUE(check_binary<op::Subtract>());
|
||||
}
|
||||
|
||||
TEST(copy, sum)
|
||||
{
|
||||
Shape shape{4, 3};
|
||||
AxisSet axes{1};
|
||||
auto arg0 = make_shared<op::Parameter>(element::f32, shape);
|
||||
|
||||
auto node = make_shared<op::Sum>(arg0, axes);
|
||||
OutputVector new_args{make_shared<op::Parameter>(element::f32, shape),
|
||||
node->input_value(1).get_node_shared_ptr()};
|
||||
auto new_node = node->clone_with_new_inputs(new_args);
|
||||
auto node_cast = as_type_ptr<op::Sum>(new_node);
|
||||
ASSERT_NE(node_cast, nullptr);
|
||||
|
||||
ASSERT_TRUE(nullptr != new_node);
|
||||
ASSERT_TRUE(new_args == new_node->input_values());
|
||||
ASSERT_TRUE(axes == node_cast->get_reduction_axes());
|
||||
}
|
||||
|
||||
TEST(copy, tan)
|
||||
{
|
||||
ASSERT_TRUE(check_unary<op::Tan>());
|
||||
|
||||
@@ -623,6 +623,15 @@ namespace
|
||||
EXPECT_FALSE(op::is_binary_elementwise_logical(&node));
|
||||
}
|
||||
|
||||
void op_is_ReduceSum()
|
||||
{
|
||||
op::v1::ReduceSum node;
|
||||
EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node));
|
||||
EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node));
|
||||
EXPECT_FALSE(op::is_binary_elementwise_comparison(&node));
|
||||
EXPECT_FALSE(op::is_binary_elementwise_logical(&node));
|
||||
}
|
||||
|
||||
void op_is_Relu()
|
||||
{
|
||||
op::Relu node;
|
||||
@@ -821,15 +830,6 @@ namespace
|
||||
EXPECT_FALSE(op::is_binary_elementwise_logical(&node));
|
||||
}
|
||||
|
||||
void op_is_Sum()
|
||||
{
|
||||
op::Sum node;
|
||||
EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node));
|
||||
EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node));
|
||||
EXPECT_FALSE(op::is_binary_elementwise_comparison(&node));
|
||||
EXPECT_FALSE(op::is_binary_elementwise_logical(&node));
|
||||
}
|
||||
|
||||
void op_is_Tan()
|
||||
{
|
||||
op::Tan node;
|
||||
|
||||
@@ -33,8 +33,6 @@
|
||||
#include "ngraph/op/multiply.hpp"
|
||||
#include "ngraph/op/sqrt.hpp"
|
||||
#include "ngraph/op/subtract.hpp"
|
||||
#include "ngraph/op/sum.hpp"
|
||||
#include "ngraph/op/sum.hpp"
|
||||
#include "ngraph/op/util/op_types.hpp"
|
||||
#include "ngraph/pass/graph_rewrite.hpp"
|
||||
#include "ngraph/pass/manager.hpp"
|
||||
@@ -63,9 +61,11 @@ static std::shared_ptr<pattern::op::Label> construct_variance_graph()
|
||||
auto N = op::Constant::create(element::f32, Shape{3}, {2, 2, 2});
|
||||
auto input = std::make_shared<pattern::op::Label>(element::f32, Shape{2, 3});
|
||||
auto input_sq = std::make_shared<op::Multiply>(input, input);
|
||||
auto sum_input = std::make_shared<op::Sum>(input, AxisSet{0});
|
||||
auto sum_input =
|
||||
std::make_shared<op::v1::ReduceSum>(input, op::Constant::create(element::i64, {1}, {0}));
|
||||
auto square_sumed_input = std::make_shared<op::Multiply>(sum_input, sum_input);
|
||||
auto sum_squared_input = std::make_shared<op::Sum>(input_sq, AxisSet{0});
|
||||
auto sum_squared_input =
|
||||
std::make_shared<op::v1::ReduceSum>(input_sq, op::Constant::create(element::i64, {1}, {0}));
|
||||
auto avg_input_sum_sq = std::make_shared<op::Divide>(square_sumed_input, N);
|
||||
auto xmu = std::make_shared<op::Subtract>(sum_squared_input, avg_input_sum_sq);
|
||||
auto variance = std::make_shared<op::Divide>(xmu, N);
|
||||
@@ -80,7 +80,8 @@ static std::shared_ptr<pattern::op::Label> construct_mean_graph()
|
||||
// construct mean;
|
||||
auto input = std::make_shared<pattern::op::Label>(element::f32, Shape{2, 3});
|
||||
auto N = op::Constant::create(element::f32, Shape{3}, {2, 2, 2});
|
||||
auto sum_input1 = std::make_shared<op::Sum>(input, AxisSet{0});
|
||||
auto sum_input1 =
|
||||
std::make_shared<op::v1::ReduceSum>(input, op::Constant::create(element::i64, {1}, {0}));
|
||||
auto mean = std::make_shared<op::Divide>(sum_input1, N);
|
||||
auto mean_label = std::make_shared<pattern::op::Label>(mean, nullptr, NodeVector{mean});
|
||||
return mean_label;
|
||||
@@ -488,7 +489,8 @@ TEST(pattern, mean)
|
||||
|
||||
auto input = std::make_shared<op::Parameter>(element::f32, Shape{2, 3});
|
||||
auto N = op::Constant::create(element::f32, Shape{3}, {2, 2, 2});
|
||||
auto sum_input1 = std::make_shared<op::Sum>(input, AxisSet{0});
|
||||
auto sum_input1 =
|
||||
std::make_shared<op::v1::ReduceSum>(input, op::Constant::create(element::i64, {1}, {0}));
|
||||
auto mean = std::make_shared<op::Divide>(sum_input1, N);
|
||||
|
||||
auto mean_graph = construct_mean_graph();
|
||||
@@ -503,9 +505,11 @@ TEST(pattern, variance)
|
||||
auto N = op::Constant::create(element::f32, Shape{3}, {2, 2, 2});
|
||||
auto input = std::make_shared<pattern::op::Label>(element::f32, Shape{2, 3});
|
||||
auto input_sq = std::make_shared<op::Multiply>(input, input);
|
||||
auto sum_input = std::make_shared<op::Sum>(input, AxisSet{0});
|
||||
auto sum_input =
|
||||
std::make_shared<op::v1::ReduceSum>(input, op::Constant::create(element::i64, {1}, {0}));
|
||||
auto square_sumed_input = std::make_shared<op::Multiply>(sum_input, sum_input);
|
||||
auto sum_squared_input = std::make_shared<op::Sum>(input_sq, AxisSet{0});
|
||||
auto sum_squared_input =
|
||||
std::make_shared<op::v1::ReduceSum>(input_sq, op::Constant::create(element::i64, {1}, {0}));
|
||||
auto avg_input_sum_sq = std::make_shared<op::Divide>(square_sumed_input, N);
|
||||
auto xmu = std::make_shared<op::Subtract>(sum_squared_input, avg_input_sum_sq);
|
||||
auto variance = std::make_shared<op::Divide>(xmu, N);
|
||||
|
||||
@@ -1390,7 +1390,6 @@ protected:
|
||||
case OP_TYPEID::Softmax:
|
||||
case OP_TYPEID::Split_v1:
|
||||
case OP_TYPEID::Squeeze:
|
||||
case OP_TYPEID::Sum:
|
||||
case OP_TYPEID::Subtract:
|
||||
case OP_TYPEID::Unsqueeze:
|
||||
case OP_TYPEID::Xor:
|
||||
|
||||
@@ -123,7 +123,6 @@ NGRAPH_OP(Sqrt, ngraph::op)
|
||||
NGRAPH_OP(SquaredDifference, ngraph::op)
|
||||
NGRAPH_OP(Squeeze, ngraph::op)
|
||||
NGRAPH_OP(Subtract, ngraph::op)
|
||||
NGRAPH_OP(Sum, ngraph::op)
|
||||
NGRAPH_OP(Tan, ngraph::op)
|
||||
NGRAPH_OP(Tanh, ngraph::op)
|
||||
NGRAPH_OP(TensorIterator, ngraph::op)
|
||||
|
||||
@@ -310,47 +310,6 @@ namespace opset0_downgrade
|
||||
return op_cast_binary_elementwise_node<op::v0::Power, op::v1::Power>(node);
|
||||
}
|
||||
|
||||
shared_ptr<Node> op_cast(shared_ptr<op::v1::ReduceMean> node)
|
||||
{
|
||||
// ReduceMean = Sum / Count
|
||||
auto sum_node = op_cast_reduction_node<op::v0::Sum, op::v1::ReduceMean>(node);
|
||||
|
||||
// Count = Sum(Constant(1, shape=data.shape))
|
||||
const auto data = node->input_value(0);
|
||||
const auto axes = node->input_value(1);
|
||||
const auto const_node =
|
||||
op::v0::Constant::create(data.get_element_type(), data.get_shape(), {1});
|
||||
std::shared_ptr<Node> count_node = std::make_shared<op::v0::Sum>(const_node, axes);
|
||||
|
||||
// Support keep_dims attribute
|
||||
if (node->get_keep_dims())
|
||||
{
|
||||
// In order to keep the original dimensions we need to reshape the Count node
|
||||
// before we use it in Divide with NUMPY broadcast
|
||||
auto output_shape = count_node->get_shape();
|
||||
auto reshaped_output_shape = output_shape;
|
||||
for (const auto& axis : node->get_reduction_axes())
|
||||
{
|
||||
reshaped_output_shape.insert(reshaped_output_shape.begin() + axis, 1);
|
||||
}
|
||||
auto shape_pattern = op::Constant::create(
|
||||
element::u64, {reshaped_output_shape.size()}, reshaped_output_shape);
|
||||
count_node = make_shared<op::v1::Reshape>(count_node->output(0), shape_pattern, false);
|
||||
}
|
||||
|
||||
const auto replacement_node =
|
||||
std::make_shared<op::v0::Divide>(sum_node, count_node, op::AutoBroadcastSpec::NUMPY);
|
||||
replace_node(node, replacement_node);
|
||||
return replacement_node;
|
||||
}
|
||||
|
||||
shared_ptr<Node> op_cast(shared_ptr<op::v1::ReduceSum> node)
|
||||
{
|
||||
auto replacement_node = op_cast_reduction_node<op::v0::Sum, op::v1::ReduceSum>(node);
|
||||
replace_node(node, replacement_node);
|
||||
return replacement_node;
|
||||
}
|
||||
|
||||
shared_ptr<Node> op_cast(shared_ptr<op::v1::Select> node)
|
||||
{
|
||||
ngraph::pass::ImplicitBroadcastElimination().run_on_node(node);
|
||||
|
||||
@@ -307,15 +307,6 @@ namespace opset1_upgrade
|
||||
return op_cast_binary_elementwise_node<op::v0::Subtract, op::v1::Subtract>(node);
|
||||
}
|
||||
|
||||
shared_ptr<Node> op_cast(shared_ptr<op::Sum> node)
|
||||
{
|
||||
bool keep_dims = false;
|
||||
auto replacement_node =
|
||||
make_shared<op::v1::ReduceSum>(node->input_value(0), node->input_value(1), keep_dims);
|
||||
replace_node(node, replacement_node);
|
||||
return replacement_node;
|
||||
}
|
||||
|
||||
shared_ptr<Node> op_cast(shared_ptr<op::TopK> node)
|
||||
{
|
||||
NGRAPH_CHECK(op::is_constant(node->input_value(1).get_node()),
|
||||
|
||||
@@ -1,141 +0,0 @@
|
||||
//*****************************************************************************
|
||||
// Copyright 2017-2020 Intel Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//*****************************************************************************
|
||||
|
||||
#include "gtest/gtest.h"
|
||||
#include "ngraph/ngraph.hpp"
|
||||
#include "util/type_prop.hpp"
|
||||
|
||||
NGRAPH_SUPPRESS_DEPRECATED_START
|
||||
|
||||
using namespace std;
|
||||
using namespace ngraph;
|
||||
|
||||
TEST(type_prop, sum_deduce)
|
||||
{
|
||||
auto param_0 = make_shared<op::Parameter>(element::f32, Shape{2, 4});
|
||||
|
||||
auto r0 = make_shared<op::Sum>(param_0, AxisSet{0});
|
||||
ASSERT_EQ(r0->get_element_type(), element::f32);
|
||||
ASSERT_EQ(r0->get_shape(), (Shape{4}));
|
||||
|
||||
auto r1 = make_shared<op::Sum>(param_0, AxisSet{1});
|
||||
ASSERT_EQ(r1->get_element_type(), element::f32);
|
||||
ASSERT_EQ(r1->get_shape(), (Shape{2}));
|
||||
|
||||
auto r01 = make_shared<op::Sum>(param_0, AxisSet{0, 1});
|
||||
ASSERT_EQ(r01->get_element_type(), element::f32);
|
||||
ASSERT_EQ(r01->get_shape(), (Shape{}));
|
||||
|
||||
auto r_none = make_shared<op::Sum>(param_0, AxisSet{});
|
||||
ASSERT_EQ(r_none->get_element_type(), element::f32);
|
||||
ASSERT_EQ(r_none->get_shape(), (Shape{2, 4}));
|
||||
}
|
||||
|
||||
TEST(type_prop, sum_axis_oob)
|
||||
{
|
||||
auto param_0 = make_shared<op::Parameter>(element::f32, Shape{2, 4});
|
||||
|
||||
try
|
||||
{
|
||||
auto r = make_shared<op::Sum>(param_0, AxisSet{0, 2, 1});
|
||||
// Should have thrown, so fail if it didn't
|
||||
FAIL() << "Did not detect out-of-bound axis for sum";
|
||||
}
|
||||
catch (const NodeValidationFailure& error)
|
||||
{
|
||||
EXPECT_HAS_SUBSTRING(error.what(), std::string("Reduction axis (2) is out of bounds"));
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
FAIL() << "Deduced type check failed for unexpected reason";
|
||||
}
|
||||
}
|
||||
|
||||
TEST(type_prop, sum_dynamic_axes)
|
||||
{
|
||||
auto param = make_shared<op::Parameter>(element::f32, Shape{3, 4, 5});
|
||||
auto summation_axes = make_shared<op::Parameter>(element::i64, Shape{2});
|
||||
auto sum = make_shared<op::Sum>(param, summation_axes);
|
||||
|
||||
EXPECT_EQ(sum->get_output_element_type(0), element::f32);
|
||||
EXPECT_TRUE(sum->get_output_partial_shape(0).rank().is_dynamic());
|
||||
}
|
||||
|
||||
TEST(type_prop, sum_partial_rank_dynamic)
|
||||
{
|
||||
auto param = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
|
||||
auto summation_axes = AxisSet{2385, 0, 4404}; // arbitrary
|
||||
auto sum = make_shared<op::Sum>(param, summation_axes);
|
||||
|
||||
EXPECT_EQ(sum->get_output_element_type(0), element::f32);
|
||||
EXPECT_TRUE(sum->get_output_partial_shape(0).is_dynamic());
|
||||
}
|
||||
|
||||
TEST(type_prop, sum_partial_rank_static_dynamic_ok_result_static)
|
||||
{
|
||||
auto param =
|
||||
make_shared<op::Parameter>(element::f32, PartialShape{1, 2, Dimension::dynamic(), 4, 5});
|
||||
auto summation_axes = AxisSet{2, 3};
|
||||
auto sum = make_shared<op::Sum>(param, summation_axes);
|
||||
|
||||
EXPECT_EQ(sum->get_output_element_type(0), element::f32);
|
||||
EXPECT_EQ(sum->get_shape(), (Shape{1, 2, 5}));
|
||||
}
|
||||
|
||||
TEST(type_prop, sum_partial_rank_static_dynamic_ok_result_dynamic)
|
||||
{
|
||||
auto param = make_shared<op::Parameter>(
|
||||
element::f32, PartialShape{1, 2, Dimension::dynamic(), 4, Dimension::dynamic()});
|
||||
auto summation_axes = AxisSet{2, 3};
|
||||
auto sum = make_shared<op::Sum>(param, summation_axes);
|
||||
|
||||
EXPECT_EQ(sum->get_output_element_type(0), element::f32);
|
||||
EXPECT_TRUE(
|
||||
sum->get_output_partial_shape(0).same_scheme(PartialShape{1, 2, Dimension::dynamic()}));
|
||||
}
|
||||
|
||||
TEST(type_prop, sum_partial_rank_static_dynamic_axes_oob)
|
||||
{
|
||||
auto param = make_shared<op::Parameter>(
|
||||
element::f32, PartialShape{1, 2, Dimension::dynamic(), 4, Dimension::dynamic()});
|
||||
auto summation_axes = AxisSet{2, 5, 1};
|
||||
|
||||
try
|
||||
{
|
||||
auto sum = make_shared<op::Sum>(param, summation_axes);
|
||||
// Should have thrown, so fail if it didn't
|
||||
FAIL() << "Did not detect out-of-bound axis for sum (rank-static dynamic input)";
|
||||
}
|
||||
catch (const NodeValidationFailure& error)
|
||||
{
|
||||
EXPECT_HAS_SUBSTRING(error.what(), std::string("Reduction axis (5) is out of bounds"));
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
FAIL() << "Deduced type check failed for unexpected reason";
|
||||
}
|
||||
}
|
||||
|
||||
TEST(type_prop, sum_partial_negative_axes)
|
||||
{
|
||||
auto param =
|
||||
make_shared<op::Parameter>(element::f32, PartialShape{1, 2, Dimension::dynamic(), 4, 5});
|
||||
auto summation_axes = op::Constant::create(element::i64, Shape{2}, {-3, -2});
|
||||
auto sum = make_shared<op::Sum>(param, summation_axes);
|
||||
|
||||
EXPECT_EQ(sum->get_output_element_type(0), element::f32);
|
||||
EXPECT_EQ(sum->get_shape(), (Shape{1, 2, 5}));
|
||||
}
|
||||
Reference in New Issue
Block a user