Revise reference implementation for ReduceMax operation (#5792)

* Revise reference implementation for ReduceMax operation

* Refactor backend unit tests

 * Move tests with zero dims to op_eval
 * Remove test with double elem type

* Fix code style

* Added minor changes

 * Replace CoordinateTransform for CoordinateTransformBasic
 * Added constant expression to set keep_dims as false

* Add const qualifier to local variables

* Use host tensor to retrieve and normalize axes
This commit is contained in:
Gabriele Galiero Casay
2021-06-15 15:12:12 +02:00
committed by GitHub
parent db74707835
commit c3d1c2e420
9 changed files with 343 additions and 373 deletions

View File

@@ -24,7 +24,7 @@ namespace ngraph
auto temp_max = std::vector<T>(temp_elements, 0);
auto temp_sum = std::vector<T>(temp_elements, 0);
max(arg, temp_max.data(), shape, axes, true);
max(arg, temp_max.data(), shape, axes);
CoordinateTransform transform(shape);
CoordinateTransform temp_transform(temp_shape);

View File

@@ -6,6 +6,7 @@
#include <cmath>
#include <limits>
#include <numeric>
#include "ngraph/coordinate_transform.hpp"
#include "ngraph/shape_util.hpp"
@@ -17,35 +18,35 @@ namespace ngraph
namespace reference
{
template <typename T>
void max(const T* arg,
T* out,
const Shape& in_shape,
const AxisSet& reduction_axes,
bool keep_dims)
void max(const T* arg, T* out, const Shape& in_shape, const AxisSet& reduction_axes)
{
T minval = std::numeric_limits<T>::has_infinity
? T(-std::numeric_limits<T>::infinity())
: std::numeric_limits<T>::min();
auto out_shape = reduce(in_shape, reduction_axes, keep_dims);
CoordinateTransform output_transform(out_shape);
constexpr bool dont_keep_dims_in_output = false;
const auto out_shape = reduce(in_shape, reduction_axes, dont_keep_dims_in_output);
std::fill(out, out + shape_size(out_shape), minval);
for (const Coordinate& output_coord : output_transform)
{
out[output_transform.index(output_coord)] = minval;
}
CoordinateTransform input_transform(in_shape);
const auto in_strides = row_major_strides(in_shape);
const auto out_strides = row_major_strides(out_shape);
CoordinateTransformBasic input_transform(in_shape);
for (const Coordinate& input_coord : input_transform)
{
Coordinate output_coord = reduce(input_coord, reduction_axes, keep_dims);
const Coordinate output_coord =
reduce(input_coord, reduction_axes, dont_keep_dims_in_output);
T x = arg[input_transform.index(input_coord)];
T max = out[output_transform.index(output_coord)];
const size_t in_idx = std::inner_product(
input_coord.begin(), input_coord.end(), in_strides.begin(), 0);
const size_t out_idx = std::inner_product(
output_coord.begin(), output_coord.end(), out_strides.begin(), 0);
const T x = arg[in_idx];
const T max = out[out_idx];
if (x > max)
{
out[output_transform.index(output_coord)] = x;
out[out_idx] = x;
}
}
}

View File

@@ -23,7 +23,7 @@ namespace ngraph
auto temp_elements = shape_size(temp_shape);
auto temp_ptr = new T[temp_elements];
max(arg, temp_ptr, shape, axes, true);
max(arg, temp_ptr, shape, axes);
CoordinateTransform transform(shape);
CoordinateTransform temp_transform(temp_shape);

View File

@@ -3,8 +3,10 @@
//
#include "ngraph/op/max.hpp"
#include <ngraph/validation_util.hpp>
#include "itt.hpp"
#include "ngraph/graph_util.hpp"
#include "ngraph/op/util/evaluate_helpers.hpp"
#include "ngraph/runtime/host_tensor.hpp"
#include "ngraph/runtime/reference/max.hpp"
#include "ngraph/shape_util.hpp"
@@ -22,7 +24,7 @@ namespace maxop
{
out->set_shape(reduce(arg->get_shape(), axes, keep_dims));
runtime::reference::max(
arg->get_data_ptr<ET>(), out->get_data_ptr<ET>(), arg->get_shape(), axes, keep_dims);
arg->get_data_ptr<ET>(), out->get_data_ptr<ET>(), arg->get_shape(), axes);
return true;
}
@@ -67,7 +69,13 @@ bool op::v1::ReduceMax::evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) const
{
NGRAPH_OP_SCOPE(v1_ReduceMax_evaluate);
return maxop::evaluate_max(inputs[0], outputs[0], get_reduction_axes(), get_keep_dims());
NGRAPH_CHECK(validate_host_tensor_vector(inputs, 2));
NGRAPH_CHECK(validate_host_tensor_vector(outputs, 1));
const auto reduction_axes = get_normalized_axes_from_tensor(
inputs[1], inputs[0]->get_partial_shape().rank(), get_friendly_name());
return maxop::evaluate_max(inputs[0], outputs[0], reduction_axes, get_keep_dims());
}
bool op::v1::ReduceMax::has_evaluate() const

View File

@@ -71,6 +71,7 @@ set(SRC
op_eval/memory.cpp
op_eval/mish.cpp
op_eval/non_zero.cpp
op_eval/reduce_max.cpp
op_eval/reduce_prod.cpp
op_eval/reduce_sum.cpp
op_eval/roi_align.cpp

View File

@@ -104,123 +104,6 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_matrix_rows_int32)
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, reduce_max_matrix_rows_zero)
{
Shape shape_a{3, 0};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_rt{3};
auto axes = make_shared<op::Constant>(element::i32, Shape{}, 1);
auto f =
make_shared<Function>(make_shared<op::v1::ReduceMax>(A, axes, false), ParameterVector{A});
// Create some tensors for input/output
std::vector<float> a{};
auto test_case = test::TestCase<TestEngine>(f);
test_case.add_input<float>({a});
test_case.add_expected_output<float>(shape_rt,
{-std::numeric_limits<float>::infinity(),
-std::numeric_limits<float>::infinity(),
-std::numeric_limits<float>::infinity()});
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, reduce_max_matrix_rows_zero_int32)
{
Shape shape_a{3, 0};
auto A = make_shared<op::Parameter>(element::i32, shape_a);
Shape shape_rt{3};
auto axes = make_shared<op::Constant>(element::i32, Shape{}, 1);
auto f =
make_shared<Function>(make_shared<op::v1::ReduceMax>(A, axes, false), ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::i32, shape_a);
copy_data(a, vector<int32_t>{});
auto result = backend->create_tensor(element::i32, shape_rt);
copy_data(result, vector<int32_t>({3, 3, 3}));
int32_t minval = std::numeric_limits<int32_t>::has_infinity
? -std::numeric_limits<int32_t>::infinity()
: std::numeric_limits<int32_t>::min();
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_EQ((vector<int32_t>{minval, minval, minval}), read_vector<int32_t>(result));
EXPECT_EQ((vector<int32_t>{}), read_vector<int32_t>(a));
}
NGRAPH_TEST(${BACKEND_NAME}, reduce_max_matrix_cols_zero)
{
// Now the reduction (g(x:float32[2,2],y:float32[]) = reduce(x,y,f,axes={})).
Shape shape_a{0, 2};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_rt{2};
auto axes = make_shared<op::Constant>(element::i32, Shape{}, 0);
auto f =
make_shared<Function>(make_shared<op::v1::ReduceMax>(A, axes, false), ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{});
auto result = backend->create_tensor(element::f32, shape_rt);
copy_data(result, vector<float>({3, 3}));
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_EQ((vector<float>{-std::numeric_limits<float>::infinity(),
-std::numeric_limits<float>::infinity()}),
read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, reduce_max_vector_zero)
{
Shape shape_a{0};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_rt{};
auto axes = make_shared<op::Constant>(element::i32, Shape{}, 0);
auto f =
make_shared<Function>(make_shared<op::v1::ReduceMax>(A, axes, false), ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{});
auto result = backend->create_tensor(element::f32, shape_rt);
copy_data(result, vector<float>({3}));
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_EQ((vector<float>{-std::numeric_limits<float>::infinity()}), read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, reduce_max_matrix_to_scalar_zero_by_zero)
{
Shape shape_a{0, 0};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_rt{};
auto axes = make_shared<op::Constant>(element::i32, Shape{2}, vector<int32_t>{0, 1});
auto f =
make_shared<Function>(make_shared<op::v1::ReduceMax>(A, axes, false), ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{});
auto result = backend->create_tensor(element::f32, shape_rt);
copy_data(result, vector<float>({3}));
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_EQ((vector<float>{-std::numeric_limits<float>::infinity()}), read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, reduce_max_3d_to_matrix_most_sig)
{
Shape shape_a{3, 3, 3};
@@ -319,55 +202,6 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_3d_to_scalar_int32)
EXPECT_EQ((vector<int32_t>{14}), read_vector<int32_t>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, reduce_max_3d_to_scalar_double)
{
Shape shape_a{3, 3, 3};
auto A = make_shared<op::Parameter>(element::f64, shape_a);
Shape shape_rt{};
auto axes = make_shared<op::Constant>(element::i32, Shape{3}, vector<int32_t>{0, 1, 2});
auto f =
make_shared<Function>(make_shared<op::v1::ReduceMax>(A, axes, false), ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f64, shape_a);
copy_data(a, vector<double>{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1});
auto result = backend->create_tensor(element::f64, shape_rt);
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_TRUE(test::all_close_f((vector<double>{14}), read_vector<double>(result)));
}
NGRAPH_TEST(${BACKEND_NAME}, reduce_max_3d_eliminate_zero_dim)
{
Shape shape_a{3, 0, 2};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_rt{3, 2};
auto axes = make_shared<op::Constant>(element::i32, Shape{}, 1);
auto f =
make_shared<Function>(make_shared<op::v1::ReduceMax>(A, axes, false), ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{});
auto result = backend->create_tensor(element::f32, shape_rt);
// Overwrite the initial result vector to make sure we're not just coincidentally getting the
// right value.
copy_data(result, vector<float>{2112, 2112, 2112, 2112, 2112, 2112});
float mi = -std::numeric_limits<float>::infinity();
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_EQ((vector<float>{mi, mi, mi, mi, mi, mi}), read_vector<float>(result));
}
// ----------------------- keep dims = true ----------------------- //
NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_to_scalar)
@@ -473,127 +307,6 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_matrix_rows_int32)
EXPECT_EQ((vector<int32_t>{2, 4, 6}), read_vector<int32_t>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_matrix_rows_zero)
{
Shape shape_a{3, 0};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_rt{3, 1};
auto axes = make_shared<op::Constant>(element::i32, Shape{}, 1);
auto f =
make_shared<Function>(make_shared<op::v1::ReduceMax>(A, axes, true), ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{});
auto result = backend->create_tensor(element::f32, shape_rt);
copy_data(result, vector<float>({3, 3, 3}));
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_EQ((vector<float>{-std::numeric_limits<float>::infinity(),
-std::numeric_limits<float>::infinity(),
-std::numeric_limits<float>::infinity()}),
read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_matrix_rows_zero_int32)
{
Shape shape_a{3, 0};
auto A = make_shared<op::Parameter>(element::i32, shape_a);
Shape shape_rt{3, 1};
auto axes = make_shared<op::Constant>(element::i32, Shape{}, 1);
auto f =
make_shared<Function>(make_shared<op::v1::ReduceMax>(A, axes, true), ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::i32, shape_a);
copy_data(a, vector<int32_t>{});
auto result = backend->create_tensor(element::i32, shape_rt);
copy_data(result, vector<int32_t>({3, 3, 3}));
int32_t minval = std::numeric_limits<int32_t>::has_infinity
? -std::numeric_limits<int32_t>::infinity()
: std::numeric_limits<int32_t>::min();
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_EQ((vector<int32_t>{minval, minval, minval}), read_vector<int32_t>(result));
EXPECT_EQ((vector<int32_t>{}), read_vector<int32_t>(a));
}
NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_matrix_cols_zero)
{
// Now the reduction (g(x:float32[2,2],y:float32[]) = reduce(x,y,f,axes={})).
Shape shape_a{0, 2};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_rt{1, 2};
auto axes = make_shared<op::Constant>(element::i32, Shape{}, 0);
auto f =
make_shared<Function>(make_shared<op::v1::ReduceMax>(A, axes, true), ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{});
auto result = backend->create_tensor(element::f32, shape_rt);
copy_data(result, vector<float>({3, 3}));
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_EQ((vector<float>{-std::numeric_limits<float>::infinity(),
-std::numeric_limits<float>::infinity()}),
read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_vector_zero)
{
Shape shape_a{0};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_rt{1};
auto axes = make_shared<op::Constant>(element::i32, Shape{}, 0);
auto f =
make_shared<Function>(make_shared<op::v1::ReduceMax>(A, axes, true), ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{});
auto result = backend->create_tensor(element::f32, shape_rt);
copy_data(result, vector<float>({3}));
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_EQ((vector<float>{-std::numeric_limits<float>::infinity()}), read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_matrix_to_scalar_zero_by_zero)
{
Shape shape_a{0, 0};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_rt{1, 1};
auto axes = make_shared<op::Constant>(element::i32, Shape{2}, vector<int32_t>{0, 1});
auto f =
make_shared<Function>(make_shared<op::v1::ReduceMax>(A, axes, true), ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{});
auto result = backend->create_tensor(element::f32, shape_rt);
copy_data(result, vector<float>({3}));
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_EQ((vector<float>{-std::numeric_limits<float>::infinity()}), read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_3d_to_matrix_most_sig)
{
Shape shape_a{3, 3, 3};
@@ -711,55 +424,6 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_3d_to_scalar_int32)
EXPECT_EQ((vector<int32_t>{14}), read_vector<int32_t>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_3d_to_scalar_double)
{
Shape shape_a{3, 3, 3};
auto A = make_shared<op::Parameter>(element::f64, shape_a);
Shape shape_rt{1, 1, 1};
auto axes = make_shared<op::Constant>(element::i32, Shape{3}, vector<int32_t>{0, 1, 2});
auto f =
make_shared<Function>(make_shared<op::v1::ReduceMax>(A, axes, true), ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f64, shape_a);
copy_data(a, vector<double>{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1});
auto result = backend->create_tensor(element::f64, shape_rt);
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_TRUE(test::all_close_f((vector<double>{14}), read_vector<double>(result)));
}
NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_3d_eliminate_zero_dim)
{
Shape shape_a{3, 0, 2};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_rt{3, 1, 2};
auto axes = make_shared<op::Constant>(element::i32, Shape{}, 1);
auto f =
make_shared<Function>(make_shared<op::v1::ReduceMax>(A, axes, true), ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{});
auto result = backend->create_tensor(element::f32, shape_rt);
// Overwrite the initial result vector to make sure we're not just coincidentally getting the
// right value.
copy_data(result, vector<float>{2112, 2112, 2112, 2112, 2112, 2112});
float mi = -std::numeric_limits<float>::infinity();
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_EQ((vector<float>{mi, mi, mi, mi, mi, mi}), read_vector<float>(result));
}
// Dynamic
NGRAPH_TEST(${BACKEND_NAME}, reduce_max_matrix_columns_dynamic)

View File

@@ -0,0 +1,312 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "util/test_control.hpp"
#include "util/all_close.hpp"
#include "util/all_close_f.hpp"
#include "util/ndarray.hpp"
using namespace std;
using namespace ngraph;
static string s_manifest = "${MANIFEST}";
TEST(op_eval, reduce_max_matrix_rows_zero)
{
Shape shape_a{3, 0};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_rt{3};
auto axes = make_shared<op::Constant>(element::i32, Shape{}, 1);
auto f =
make_shared<Function>(make_shared<op::v1::ReduceMax>(A, axes, false), ParameterVector{A});
auto backend = runtime::Backend::create("INTERPRETER");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{});
auto result = backend->create_tensor(element::f32, shape_rt);
copy_data(result, vector<float>({3, 3, 3}));
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_EQ((vector<float>{-std::numeric_limits<float>::infinity(),
-std::numeric_limits<float>::infinity(),
-std::numeric_limits<float>::infinity()}), read_vector<float>(result));
}
TEST(op_eval, reduce_max_matrix_rows_zero_int32)
{
Shape shape_a{3, 0};
auto A = make_shared<op::Parameter>(element::i32, shape_a);
Shape shape_rt{3};
auto axes = make_shared<op::Constant>(element::i32, Shape{}, 1);
auto f =
make_shared<Function>(make_shared<op::v1::ReduceMax>(A, axes, false), ParameterVector{A});
auto backend = runtime::Backend::create("INTERPRETER");
// Create some tensors for input/output
auto a = backend->create_tensor(element::i32, shape_a);
copy_data(a, vector<int32_t>{});
auto result = backend->create_tensor(element::i32, shape_rt);
copy_data(result, vector<int32_t>({3, 3, 3}));
int32_t minval = std::numeric_limits<int32_t>::has_infinity
? -std::numeric_limits<int32_t>::infinity()
: std::numeric_limits<int32_t>::min();
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_EQ((vector<int32_t>{minval, minval, minval}), read_vector<int32_t>(result));
EXPECT_EQ((vector<int32_t>{}), read_vector<int32_t>(a));
}
TEST(op_eval, reduce_max_matrix_cols_zero)
{
// Now the reduction (g(x:float32[2,2],y:float32[]) = reduce(x,y,f,axes={})).
Shape shape_a{0, 2};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_rt{2};
auto axes = make_shared<op::Constant>(element::i32, Shape{}, 0);
auto f =
make_shared<Function>(make_shared<op::v1::ReduceMax>(A, axes, false), ParameterVector{A});
auto backend = runtime::Backend::create("INTERPRETER");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{});
auto result = backend->create_tensor(element::f32, shape_rt);
copy_data(result, vector<float>({3, 3}));
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_EQ((vector<float>{-std::numeric_limits<float>::infinity(),
-std::numeric_limits<float>::infinity()}),
read_vector<float>(result));
}
TEST(op_eval, reduce_max_vector_zero)
{
Shape shape_a{0};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_rt{};
auto axes = make_shared<op::Constant>(element::i32, Shape{}, 0);
auto f =
make_shared<Function>(make_shared<op::v1::ReduceMax>(A, axes, false), ParameterVector{A});
auto backend = runtime::Backend::create("INTERPRETER");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{});
auto result = backend->create_tensor(element::f32, shape_rt);
copy_data(result, vector<float>({3}));
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_EQ((vector<float>{-std::numeric_limits<float>::infinity()}), read_vector<float>(result));
}
TEST(op_eval, reduce_max_matrix_to_scalar_zero_by_zero)
{
Shape shape_a{0, 0};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_rt{};
auto axes = make_shared<op::Constant>(element::i32, Shape{2}, vector<int32_t>{0, 1});
auto f =
make_shared<Function>(make_shared<op::v1::ReduceMax>(A, axes, false), ParameterVector{A});
auto backend = runtime::Backend::create("INTERPRETER");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{});
auto result = backend->create_tensor(element::f32, shape_rt);
copy_data(result, vector<float>({3}));
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_EQ((vector<float>{-std::numeric_limits<float>::infinity()}), read_vector<float>(result));
}
TEST(op_eval, reduce_max_3d_eliminate_zero_dim)
{
Shape shape_a{3, 0, 2};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_rt{3, 2};
auto axes = make_shared<op::Constant>(element::i32, Shape{}, 1);
auto f =
make_shared<Function>(make_shared<op::v1::ReduceMax>(A, axes, false), ParameterVector{A});
auto backend = runtime::Backend::create("INTERPRETER");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{});
auto result = backend->create_tensor(element::f32, shape_rt);
// Overwrite the initial result vector to make sure we're not just coincidentally getting the
// right value.
copy_data(result, vector<float>{2112, 2112, 2112, 2112, 2112, 2112});
float mi = -std::numeric_limits<float>::infinity();
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_EQ((vector<float>{mi, mi, mi, mi, mi, mi}), read_vector<float>(result));
}
TEST(op_eval, reduce_max_keep_matrix_rows_zero)
{
Shape shape_a{3, 0};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_rt{3, 1};
auto axes = make_shared<op::Constant>(element::i32, Shape{}, 1);
auto f =
make_shared<Function>(make_shared<op::v1::ReduceMax>(A, axes, true), ParameterVector{A});
auto backend = runtime::Backend::create("INTERPRETER");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{});
auto result = backend->create_tensor(element::f32, shape_rt);
copy_data(result, vector<float>({3, 3, 3}));
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_EQ((vector<float>{-std::numeric_limits<float>::infinity(),
-std::numeric_limits<float>::infinity(),
-std::numeric_limits<float>::infinity()}),
read_vector<float>(result));
}
TEST(op_eval, reduce_max_keep_matrix_rows_zero_int32)
{
Shape shape_a{3, 0};
auto A = make_shared<op::Parameter>(element::i32, shape_a);
Shape shape_rt{3, 1};
auto axes = make_shared<op::Constant>(element::i32, Shape{}, 1);
auto f =
make_shared<Function>(make_shared<op::v1::ReduceMax>(A, axes, true), ParameterVector{A});
auto backend = runtime::Backend::create("INTERPRETER");
// Create some tensors for input/output
auto a = backend->create_tensor(element::i32, shape_a);
copy_data(a, vector<int32_t>{});
auto result = backend->create_tensor(element::i32, shape_rt);
copy_data(result, vector<int32_t>({3, 3, 3}));
int32_t minval = std::numeric_limits<int32_t>::has_infinity
? -std::numeric_limits<int32_t>::infinity()
: std::numeric_limits<int32_t>::min();
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_EQ((vector<int32_t>{minval, minval, minval}), read_vector<int32_t>(result));
EXPECT_EQ((vector<int32_t>{}), read_vector<int32_t>(a));
}
TEST(op_eval, reduce_max_keep_matrix_cols_zero)
{
// Now the reduction (g(x:float32[2,2],y:float32[]) = reduce(x,y,f,axes={})).
Shape shape_a{0, 2};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_rt{1, 2};
auto axes = make_shared<op::Constant>(element::i32, Shape{}, 0);
auto f =
make_shared<Function>(make_shared<op::v1::ReduceMax>(A, axes, true), ParameterVector{A});
auto backend = runtime::Backend::create("INTERPRETER");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{});
auto result = backend->create_tensor(element::f32, shape_rt);
copy_data(result, vector<float>({3, 3}));
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_EQ((vector<float>{-std::numeric_limits<float>::infinity(),
-std::numeric_limits<float>::infinity()}),
read_vector<float>(result));
}
TEST(op_eval, reduce_max_keep_vector_zero)
{
Shape shape_a{0};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_rt{1};
auto axes = make_shared<op::Constant>(element::i32, Shape{}, 0);
auto f =
make_shared<Function>(make_shared<op::v1::ReduceMax>(A, axes, true), ParameterVector{A});
auto backend = runtime::Backend::create("INTERPRETER");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{});
auto result = backend->create_tensor(element::f32, shape_rt);
copy_data(result, vector<float>({3}));
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_EQ((vector<float>{-std::numeric_limits<float>::infinity()}), read_vector<float>(result));
}
TEST(op_eval, reduce_max_keep_matrix_to_scalar_zero_by_zero)
{
Shape shape_a{0, 0};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_rt{1, 1};
auto axes = make_shared<op::Constant>(element::i32, Shape{2}, vector<int32_t>{0, 1});
auto f =
make_shared<Function>(make_shared<op::v1::ReduceMax>(A, axes, true), ParameterVector{A});
auto backend = runtime::Backend::create("INTERPRETER");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{});
auto result = backend->create_tensor(element::f32, shape_rt);
copy_data(result, vector<float>({3}));
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_EQ((vector<float>{-std::numeric_limits<float>::infinity()}), read_vector<float>(result));
}
TEST(op_eval, reduce_max_keep_3d_eliminate_zero_dim)
{
Shape shape_a{3, 0, 2};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_rt{3, 1, 2};
auto axes = make_shared<op::Constant>(element::i32, Shape{}, 1);
auto f =
make_shared<Function>(make_shared<op::v1::ReduceMax>(A, axes, true), ParameterVector{A});
auto backend = runtime::Backend::create("INTERPRETER");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, vector<float>{});
auto result = backend->create_tensor(element::f32, shape_rt);
// Overwrite the initial result vector to make sure we're not just coincidentally getting the
// right value.
copy_data(result, vector<float>{2112, 2112, 2112, 2112, 2112, 2112});
float mi = -std::numeric_limits<float>::infinity();
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_EQ((vector<float>{mi, mi, mi, mi, mi, mi}), read_vector<float>(result));
}

View File

@@ -474,21 +474,7 @@ reduce_mean_matrix_rows_int32
reduce_mean_keep_to_scalar_int8
reduce_mean_keep_matrix_rows_int32
reduce_max_to_scalar_int8
reduce_max_matrix_rows_zero
reduce_max_matrix_rows_zero_int32
reduce_max_matrix_cols_zero
reduce_max_vector_zero
reduce_max_matrix_to_scalar_zero_by_zero
reduce_max_3d_to_scalar_double
reduce_max_3d_eliminate_zero_dim
reduce_max_keep_to_scalar_int8
reduce_max_keep_matrix_rows_zero
reduce_max_keep_matrix_rows_zero_int32
reduce_max_keep_matrix_cols_zero
reduce_max_keep_vector_zero
reduce_max_keep_matrix_to_scalar_zero_by_zero
reduce_max_keep_3d_to_scalar_double
reduce_max_keep_3d_eliminate_zero_dim
# Incorrect precision f64!
sum_trivial_in_double

View File

@@ -41,9 +41,7 @@ INTERPRETER.reduce_min_keep_to_scalar_int8
INTERPRETER.reduce_mean_to_scalar_int8
INTERPRETER.reduce_mean_keep_to_scalar_int8
INTERPRETER.reduce_max_to_scalar_int8
INTERPRETER.reduce_max_3d_to_scalar_double
INTERPRETER.reduce_max_keep_to_scalar_int8
INTERPRETER.reduce_max_keep_3d_to_scalar_double
INTERPRETER.product_to_scalar_int8
INTERPRETER.max_pool_uint8
INTERPRETER.max_pool_int8