From 1337997134d7e1af6cd89a6d8f442c22f1dfbc21 Mon Sep 17 00:00:00 2001 From: Piotr Szmelczynski Date: Wed, 16 Sep 2020 15:33:02 +0200 Subject: [PATCH] Test refactor (#2225) --- ngraph/test/backend/max.in.cpp | 404 +++++----------- ngraph/test/backend/min.in.cpp | 354 ++++---------- ngraph/test/backend/minimum.in.cpp | 61 +-- .../test/backend/quantize_dequantize.in.cpp | 454 +++++++----------- 4 files changed, 433 insertions(+), 840 deletions(-) diff --git a/ngraph/test/backend/max.in.cpp b/ngraph/test/backend/max.in.cpp index 81730e26ec5..ce09f3c94b8 100644 --- a/ngraph/test/backend/max.in.cpp +++ b/ngraph/test/backend/max.in.cpp @@ -16,14 +16,9 @@ #include "gtest/gtest.h" #include "ngraph/ngraph.hpp" -#include "ngraph/runtime/tensor.hpp" -#include "runtime/backend.hpp" -#include "util/all_close.hpp" -#include "util/all_close_f.hpp" -#include "util/known_element_types.hpp" -#include "util/ndarray.hpp" +#include "util/engine/test_engines.hpp" +#include "util/test_case.hpp" #include "util/test_control.hpp" -#include "util/test_tools.hpp" NGRAPH_SUPPRESS_DEPRECATED_START @@ -31,6 +26,7 @@ using namespace std; using namespace ngraph; static string s_manifest = "${MANIFEST}"; +using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); // Trivial case with no reduced axes. NGRAPH_TEST(${BACKEND_NAME}, max_trivial) @@ -39,17 +35,12 @@ NGRAPH_TEST(${BACKEND_NAME}, max_trivial) auto A = make_shared(element::f32, shape); auto f = make_shared(make_shared(A, AxisSet{}), ParameterVector{A}); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); + std::vector a{1, 2, 3, 4}; - // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); - copy_data(a, vector{1, 2, 3, 4}); - auto result = backend->create_tensor(element::f32, shape); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_TRUE(test::all_close_f( - (vector{1, 2, 3, 4}), read_vector(result), MIN_FLOAT_TOLERANCE_BITS)); + auto test_case = test::TestCase(f); + test_case.add_input({a}); + test_case.add_expected_output(shape, {1, 2, 3, 4}); + test_case.run(MIN_FLOAT_TOLERANCE_BITS); } NGRAPH_TEST(${BACKEND_NAME}, max_trivial_int8) @@ -58,16 +49,12 @@ NGRAPH_TEST(${BACKEND_NAME}, max_trivial_int8) auto A = make_shared(element::i8, shape); auto f = make_shared(make_shared(A, AxisSet{}), ParameterVector{A}); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); + std::vector a{1, 2, 3, 4}; - // Create some tensors for input/output - auto a = backend->create_tensor(element::i8, shape); - copy_data(a, vector{1, 2, 3, 4}); - auto result = backend->create_tensor(element::i8, shape); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_EQ((vector{1, 2, 3, 4}), read_vector(result)); + auto test_case = test::TestCase(f); + test_case.add_input({a}); + test_case.add_expected_output(shape, {1, 2, 3, 4}); + test_case.run(); } // Failure has been reported at 5D for some reason @@ -77,20 +64,14 @@ NGRAPH_TEST(${BACKEND_NAME}, max_trivial_5d) auto A = make_shared(element::f32, shape); auto f = make_shared(make_shared(A, AxisSet{}), ParameterVector{A}); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); + std::vector a{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; - // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); - copy_data(a, vector{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}); - auto result = backend->create_tensor(element::f32, shape); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_TRUE(test::all_close_f((vector{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}), - read_vector(result), - MIN_FLOAT_TOLERANCE_BITS)); + auto test_case = test::TestCase(f); + test_case.add_input({a}); + test_case.add_expected_output(shape, {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}); + test_case.run(MIN_FLOAT_TOLERANCE_BITS); } NGRAPH_TEST(${BACKEND_NAME}, max_trivial_5d_int32) @@ -99,19 +80,14 @@ NGRAPH_TEST(${BACKEND_NAME}, max_trivial_5d_int32) auto A = make_shared(element::i32, shape); auto f = make_shared(make_shared(A, AxisSet{}), ParameterVector{A}); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); + std::vector a{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; - // Create some tensors for input/output - auto a = backend->create_tensor(element::i32, shape); - copy_data(a, vector{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}); - auto result = backend->create_tensor(element::i32, shape); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_EQ((vector{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}), - read_vector(result)); + auto test_case = test::TestCase(f); + test_case.add_input({a}); + test_case.add_expected_output(shape, {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}); + test_case.run(); } NGRAPH_TEST(${BACKEND_NAME}, max_to_scalar) @@ -120,21 +96,12 @@ NGRAPH_TEST(${BACKEND_NAME}, max_to_scalar) auto A = make_shared(element::f32, shape); auto f = make_shared(make_shared(A, AxisSet{0, 1}), ParameterVector{A}); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); + std::vector a{1, 2, 3, 4}; - // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); - copy_data(a, vector{1, 2, 3, 4}); - auto result = backend->create_tensor(element::f32, Shape{}); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_TRUE(test::all_close_f((vector{4}), read_vector(result))); - - // For some reason I'm feeling extra paranoid about making sure reduction doesn't clobber the - // input tensors, so let's do this too. - EXPECT_TRUE(test::all_close_f( - (vector{1, 2, 3, 4}), read_vector(a), MIN_FLOAT_TOLERANCE_BITS)); + auto test_case = test::TestCase(f); + test_case.add_input(a); + test_case.add_expected_output(Shape{}, {4}); + test_case.run(); } NGRAPH_TEST(${BACKEND_NAME}, max_to_scalar_int8) @@ -143,16 +110,12 @@ NGRAPH_TEST(${BACKEND_NAME}, max_to_scalar_int8) auto A = make_shared(element::i8, shape); auto f = make_shared(make_shared(A, AxisSet{0, 1}), ParameterVector{A}); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); + std::vector a{1, 2, 3, 4}; - // Create some tensors for input/output - auto a = backend->create_tensor(element::i8, shape); - copy_data(a, vector{1, 2, 3, 4}); - auto result = backend->create_tensor(element::i8, Shape{}); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_EQ((vector{4}), read_vector(result)); + auto test_case = test::TestCase(f); + test_case.add_input({a}); + test_case.add_expected_output(shape, {4}); + test_case.run(); } NGRAPH_TEST(${BACKEND_NAME}, max_matrix_columns) @@ -162,21 +125,12 @@ NGRAPH_TEST(${BACKEND_NAME}, max_matrix_columns) Shape shape_rt{2}; auto f = make_shared(make_shared(A, AxisSet{0}), ParameterVector{A}); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); + std::vector a{1, 2, 3, 4, 5, 6}; - // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); - copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_tensor(element::f32, shape_rt); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_TRUE(test::all_close_f((vector{5, 6}), read_vector(result))); - - // For some reason I'm feeling extra paranoid about making sure reduction doesn't clobber the - // input tensors, so let's do this too. - EXPECT_TRUE(test::all_close_f( - (vector{1, 2, 3, 4, 5, 6}), read_vector(a), MIN_FLOAT_TOLERANCE_BITS)); + auto test_case = test::TestCase(f); + test_case.add_input({a}); + test_case.add_expected_output(shape_rt, {5, 6}); + test_case.run(); } NGRAPH_TEST(${BACKEND_NAME}, max_matrix_rows) @@ -186,21 +140,12 @@ NGRAPH_TEST(${BACKEND_NAME}, max_matrix_rows) Shape shape_rt{3}; auto f = make_shared(make_shared(A, AxisSet{1}), ParameterVector{A}); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); + std::vector a{1, 2, 3, 4, 5, 6}; - // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); - copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_tensor(element::f32, shape_rt); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_TRUE(test::all_close_f((vector{2, 4, 6}), read_vector(result))); - - // For some reason I'm feeling extra paranoid about making sure reduction doesn't clobber the - // input tensors, so let's do this too. - EXPECT_TRUE(test::all_close_f( - (vector{1, 2, 3, 4, 5, 6}), read_vector(a), MIN_FLOAT_TOLERANCE_BITS)); + auto test_case = test::TestCase(f); + test_case.add_input({a}); + test_case.add_expected_output(shape_rt, {2, 4, 6}); + test_case.run(); } NGRAPH_TEST(${BACKEND_NAME}, max_matrix_rows_int32) @@ -210,20 +155,12 @@ NGRAPH_TEST(${BACKEND_NAME}, max_matrix_rows_int32) Shape shape_rt{3}; auto f = make_shared(make_shared(A, AxisSet{1}), ParameterVector{A}); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); + std::vector a{1, 2, 3, 4, 5, 6}; - // Create some tensors for input/output - auto a = backend->create_tensor(element::i32, shape_a); - copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_tensor(element::i32, shape_rt); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_EQ((vector{2, 4, 6}), read_vector(result)); - - // For some reason I'm feeling extra paranoid about making sure reduction doesn't clobber the - // input tensors, so let's do this too. - EXPECT_EQ((vector{1, 2, 3, 4, 5, 6}), read_vector(a)); + auto test_case = test::TestCase(f); + test_case.add_input({a}); + test_case.add_expected_output(shape_rt, {2, 4, 6}); + test_case.run(); } NGRAPH_TEST(${BACKEND_NAME}, max_matrix_rows_zero) @@ -233,25 +170,15 @@ NGRAPH_TEST(${BACKEND_NAME}, max_matrix_rows_zero) Shape shape_rt{3}; auto f = make_shared(make_shared(A, AxisSet{1}), ParameterVector{A}); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); + std::vector a{}; - // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); - copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); - copy_data(result, vector({3, 3, 3})); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_EQ((vector{-std::numeric_limits::infinity(), - -std::numeric_limits::infinity(), - -std::numeric_limits::infinity()}), - read_vector(result)); - - // For some reason I'm feeling extra paranoid about making sure reduction doesn't clobber the - // input tensors, so let's do this too. - EXPECT_TRUE( - test::all_close_f((vector{}), read_vector(a), MIN_FLOAT_TOLERANCE_BITS)); + auto test_case = test::TestCase(f); + test_case.add_input({a}); + test_case.add_expected_output(shape_rt, + {-std::numeric_limits::infinity(), + -std::numeric_limits::infinity(), + -std::numeric_limits::infinity()}); + test_case.run(); } NGRAPH_TEST(${BACKEND_NAME}, max_matrix_rows_zero_int32) @@ -261,22 +188,16 @@ NGRAPH_TEST(${BACKEND_NAME}, max_matrix_rows_zero_int32) Shape shape_rt{3}; auto f = make_shared(make_shared(A, AxisSet{1}), ParameterVector{A}); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - // Create some tensors for input/output - auto a = backend->create_tensor(element::i32, shape_a); - copy_data(a, vector{}); - auto result = backend->create_tensor(element::i32, shape_rt); - copy_data(result, vector({3, 3, 3})); + std::vector a{}; int32_t minval = std::numeric_limits::has_infinity ? -std::numeric_limits::infinity() : std::numeric_limits::min(); - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_EQ((vector{minval, minval, minval}), read_vector(result)); - EXPECT_EQ((vector{}), read_vector(a)); + auto test_case = test::TestCase(f); + test_case.add_input({a}); + test_case.add_expected_output(shape_rt, {minval, minval, minval}); + test_case.run(); } NGRAPH_TEST(${BACKEND_NAME}, max_matrix_cols_zero) @@ -287,24 +208,14 @@ NGRAPH_TEST(${BACKEND_NAME}, max_matrix_cols_zero) Shape shape_rt{2}; auto f = make_shared(make_shared(A, AxisSet{0}), ParameterVector{A}); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); + std::vector a{}; - // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); - copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); - copy_data(result, vector({3, 3})); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_EQ((vector{-std::numeric_limits::infinity(), - -std::numeric_limits::infinity()}), - read_vector(result)); - - // For some reason I'm feeling extra paranoid about making sure reduction doesn't clobber the - // input tensors, so let's do this too. - EXPECT_TRUE( - test::all_close_f((vector{}), read_vector(a), MIN_FLOAT_TOLERANCE_BITS)); + auto test_case = test::TestCase(f); + test_case.add_input({a}); + test_case.add_expected_output( + shape_rt, + {-std::numeric_limits::infinity(), -std::numeric_limits::infinity()}); + test_case.run(); } NGRAPH_TEST(${BACKEND_NAME}, max_vector_zero) @@ -314,22 +225,12 @@ NGRAPH_TEST(${BACKEND_NAME}, max_vector_zero) Shape shape_rt{}; auto f = make_shared(make_shared(A, AxisSet{0}), ParameterVector{A}); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); + std::vector a{}; - // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); - copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); - copy_data(result, vector({3})); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_EQ((vector{-std::numeric_limits::infinity()}), read_vector(result)); - - // For some reason I'm feeling extra paranoid about making sure reduction doesn't clobber the - // input tensors, so let's do this too. - EXPECT_TRUE( - test::all_close_f((vector{}), read_vector(a), MIN_FLOAT_TOLERANCE_BITS)); + auto test_case = test::TestCase(f); + test_case.add_input({a}); + test_case.add_expected_output(shape_rt, {-std::numeric_limits::infinity()}); + test_case.run(); } NGRAPH_TEST(${BACKEND_NAME}, max_matrix_to_scalar_zero_by_zero) @@ -339,22 +240,12 @@ NGRAPH_TEST(${BACKEND_NAME}, max_matrix_to_scalar_zero_by_zero) Shape shape_rt{}; auto f = make_shared(make_shared(A, AxisSet{0, 1}), ParameterVector{A}); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); + std::vector a{}; - // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); - copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); - copy_data(result, vector({3})); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_EQ((vector{-std::numeric_limits::infinity()}), read_vector(result)); - - // For some reason I'm feeling extra paranoid about making sure reduction doesn't clobber the - // input tensors, so let's do this too. - EXPECT_TRUE( - test::all_close_f((vector{}), read_vector(a), MIN_FLOAT_TOLERANCE_BITS)); + auto test_case = test::TestCase(f); + test_case.add_input({a}); + test_case.add_expected_output(shape_rt, {-std::numeric_limits::infinity()}); + test_case.run(); } NGRAPH_TEST(${BACKEND_NAME}, max_3d_to_matrix_most_sig) @@ -364,19 +255,13 @@ NGRAPH_TEST(${BACKEND_NAME}, max_3d_to_matrix_most_sig) Shape shape_rt{3, 3}; auto f = make_shared(make_shared(A, AxisSet{0}), ParameterVector{A}); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); + std::vector a{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, + 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27}; - // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); - copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, - 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27}); - auto result = backend->create_tensor(element::f32, shape_rt); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_TRUE(test::all_close_f((vector{19, 20, 21, 22, 23, 24, 25, 26, 27}), - read_vector(result), - MIN_FLOAT_TOLERANCE_BITS)); + auto test_case = test::TestCase(f); + test_case.add_input({a}); + test_case.add_expected_output(shape_rt, {19, 20, 21, 22, 23, 24, 25, 26, 27}); + test_case.run(MIN_FLOAT_TOLERANCE_BITS); } NGRAPH_TEST(${BACKEND_NAME}, max_3d_to_matrix_least_sig) @@ -386,19 +271,13 @@ NGRAPH_TEST(${BACKEND_NAME}, max_3d_to_matrix_least_sig) Shape shape_rt{3, 3}; auto f = make_shared(make_shared(A, AxisSet{2}), ParameterVector{A}); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); + std::vector a{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, + 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27}; - // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); - copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, - 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27}); - auto result = backend->create_tensor(element::f32, shape_rt); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_TRUE(test::all_close_f((vector{3, 6, 9, 12, 15, 18, 21, 24, 27}), - read_vector(result), - MIN_FLOAT_TOLERANCE_BITS)); + auto test_case = test::TestCase(f); + test_case.add_input({a}); + test_case.add_expected_output(shape_rt, {3, 6, 9, 12, 15, 18, 21, 24, 27}); + test_case.run(MIN_FLOAT_TOLERANCE_BITS); } NGRAPH_TEST(${BACKEND_NAME}, max_3d_to_vector) @@ -407,20 +286,13 @@ NGRAPH_TEST(${BACKEND_NAME}, max_3d_to_vector) auto A = make_shared(element::f32, shape_a); Shape shape_rt{3}; auto f = make_shared(make_shared(A, AxisSet{0, 1}), ParameterVector{A}); + std::vector a{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, + 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27}; - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); - copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, - 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27}); - auto result = backend->create_tensor(element::f32, shape_rt); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_TRUE(test::all_close_f((vector{25.0f, 26.0f, 27.0f}), - read_vector(result), - MIN_FLOAT_TOLERANCE_BITS)); + auto test_case = test::TestCase(f); + test_case.add_input({a}); + test_case.add_expected_output(shape_rt, {25.0f, 26.0f, 27.0f}); + test_case.run(MIN_FLOAT_TOLERANCE_BITS); } NGRAPH_TEST(${BACKEND_NAME}, max_3d_to_scalar) @@ -430,18 +302,13 @@ NGRAPH_TEST(${BACKEND_NAME}, max_3d_to_scalar) Shape shape_rt{}; auto f = make_shared(make_shared(A, AxisSet{0, 1, 2}), ParameterVector{A}); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); + std::vector a{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1}; - // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); - copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, - 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1}); - auto result = backend->create_tensor(element::f32, shape_rt); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_TRUE(test::all_close_f( - (vector{14.0f}), read_vector(result), MIN_FLOAT_TOLERANCE_BITS)); + auto test_case = test::TestCase(f); + test_case.add_input({a}); + test_case.add_expected_output(shape_rt, {14.0f}); + test_case.run(MIN_FLOAT_TOLERANCE_BITS); } NGRAPH_TEST(${BACKEND_NAME}, max_3d_to_scalar_int32) @@ -451,17 +318,13 @@ NGRAPH_TEST(${BACKEND_NAME}, max_3d_to_scalar_int32) Shape shape_rt{}; auto f = make_shared(make_shared(A, AxisSet{0, 1, 2}), ParameterVector{A}); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); + std::vector a{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1}; - // Create some tensors for input/output - auto a = backend->create_tensor(element::i32, shape_a); - copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, - 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1}); - auto result = backend->create_tensor(element::i32, shape_rt); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_EQ((vector{14}), read_vector(result)); + auto test_case = test::TestCase(f); + test_case.add_input({a}); + test_case.add_expected_output(shape_rt, {14}); + test_case.run(); } NGRAPH_TEST(${BACKEND_NAME}, max_3d_to_scalar_double) @@ -471,17 +334,14 @@ NGRAPH_TEST(${BACKEND_NAME}, max_3d_to_scalar_double) Shape shape_rt{}; auto f = make_shared(make_shared(A, AxisSet{0, 1, 2}), ParameterVector{A}); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); +std: + vector a{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1}; - // Create some tensors for input/output - auto a = backend->create_tensor(element::f64, shape_a); - copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, - 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1}); - auto result = backend->create_tensor(element::f64, shape_rt); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_TRUE(test::all_close_f((vector{14}), read_vector(result))); + auto test_case = test::TestCase(f); + test_case.add_input({a}); + test_case.add_expected_output(shape_rt, {14}); + test_case.run(); } NGRAPH_TEST(${BACKEND_NAME}, max_3d_eliminate_zero_dim) @@ -491,20 +351,16 @@ NGRAPH_TEST(${BACKEND_NAME}, max_3d_eliminate_zero_dim) Shape shape_rt{3, 2}; auto f = make_shared(make_shared(A, AxisSet{1}), ParameterVector{A}); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); + std::vector a{}; - // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); - copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); - - // Overwrite the initial result vector to make sure we're not just coincidentally getting the - // right value. - copy_data(result, vector{2112, 2112, 2112, 2112, 2112, 2112}); - - float mi = -std::numeric_limits::infinity(); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_EQ((vector{mi, mi, mi, mi, mi, mi}), read_vector(result)); + auto test_case = test::TestCase(f); + test_case.add_input({a}); + test_case.add_expected_output(shape_rt, + {-std::numeric_limits::infinity(), + -std::numeric_limits::infinity(), + -std::numeric_limits::infinity(), + -std::numeric_limits::infinity(), + -std::numeric_limits::infinity(), + -std::numeric_limits::infinity()}); + test_case.run(); } diff --git a/ngraph/test/backend/min.in.cpp b/ngraph/test/backend/min.in.cpp index 6080e6c890f..a6cc47e63e7 100644 --- a/ngraph/test/backend/min.in.cpp +++ b/ngraph/test/backend/min.in.cpp @@ -16,14 +16,9 @@ #include "gtest/gtest.h" #include "ngraph/ngraph.hpp" -#include "ngraph/runtime/tensor.hpp" -#include "runtime/backend.hpp" -#include "util/all_close.hpp" -#include "util/all_close_f.hpp" -#include "util/known_element_types.hpp" -#include "util/ndarray.hpp" +#include "util/engine/test_engines.hpp" +#include "util/test_case.hpp" #include "util/test_control.hpp" -#include "util/test_tools.hpp" NGRAPH_SUPPRESS_DEPRECATED_START @@ -31,6 +26,7 @@ using namespace std; using namespace ngraph; static string s_manifest = "${MANIFEST}"; +using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); // Trivial case with no reduced axes. NGRAPH_TEST(${BACKEND_NAME}, min_trivial) @@ -39,17 +35,12 @@ NGRAPH_TEST(${BACKEND_NAME}, min_trivial) auto A = make_shared(element::f32, shape); auto f = make_shared(make_shared(A, AxisSet{}), ParameterVector{A}); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); + std::vector a{1, 2, 3, 4}; - // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); - copy_data(a, vector{1, 2, 3, 4}); - auto result = backend->create_tensor(element::f32, shape); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_TRUE(test::all_close_f( - (vector{1, 2, 3, 4}), read_vector(result), MIN_FLOAT_TOLERANCE_BITS)); + auto test_case = test::TestCase(f); + test_case.add_input({a}); + test_case.add_expected_output(shape, {1, 2, 3, 4}); + test_case.run(MIN_FLOAT_TOLERANCE_BITS); } // Failure has been reported at 5D for some reason @@ -58,21 +49,14 @@ NGRAPH_TEST(${BACKEND_NAME}, min_trivial_5d) Shape shape{2, 2, 2, 2, 2}; auto A = make_shared(element::f32, shape); auto f = make_shared(make_shared(A, AxisSet{}), ParameterVector{A}); + std::vector a{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); - copy_data(a, vector{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}); - auto result = backend->create_tensor(element::f32, shape); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_TRUE(test::all_close_f((vector{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}), - read_vector(result), - MIN_FLOAT_TOLERANCE_BITS)); + auto test_case = test::TestCase(f); + test_case.add_input({a}); + test_case.add_expected_output(shape, {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}); + test_case.run(MIN_FLOAT_TOLERANCE_BITS); } NGRAPH_TEST(${BACKEND_NAME}, min_trivial_5d_int32) @@ -81,19 +65,14 @@ NGRAPH_TEST(${BACKEND_NAME}, min_trivial_5d_int32) auto A = make_shared(element::i32, shape); auto f = make_shared(make_shared(A, AxisSet{}), ParameterVector{A}); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); + std::vector a{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; - // Create some tensors for input/output - auto a = backend->create_tensor(element::i32, shape); - copy_data(a, vector{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}); - auto result = backend->create_tensor(element::i32, shape); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_EQ((vector{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}), - read_vector(result)); + auto test_case = test::TestCase(f); + test_case.add_input({a}); + test_case.add_expected_output(shape, {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}); + test_case.run(); } NGRAPH_TEST(${BACKEND_NAME}, min_to_scalar) @@ -102,22 +81,12 @@ NGRAPH_TEST(${BACKEND_NAME}, min_to_scalar) auto A = make_shared(element::f32, shape); auto f = make_shared(make_shared(A, AxisSet{0, 1}), ParameterVector{A}); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); + std::vector a{1, 2, 3, 4}; - // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); - copy_data(a, vector{1, 2, 3, 4}); - auto result = backend->create_tensor(element::f32, Shape{}); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_TRUE(test::all_close_f( - (vector{1}), read_vector(result), MIN_FLOAT_TOLERANCE_BITS)); - - // For some reason I'm feeling extra paranoid about making sure reduction doesn't clobber the - // input tensors, so let's do this too. - EXPECT_TRUE(test::all_close_f( - (vector{1, 2, 3, 4}), read_vector(a), MIN_FLOAT_TOLERANCE_BITS)); + auto test_case = test::TestCase(f); + test_case.add_input({a}); + test_case.add_expected_output(Shape{}, {1}); + test_case.run(MIN_FLOAT_TOLERANCE_BITS); } NGRAPH_TEST(${BACKEND_NAME}, min_to_scalar_int8) @@ -126,20 +95,12 @@ NGRAPH_TEST(${BACKEND_NAME}, min_to_scalar_int8) auto A = make_shared(element::i8, shape); auto f = make_shared(make_shared(A, AxisSet{0, 1}), ParameterVector{A}); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); + std::vector a{1, 2, 3, 4}; - // Create some tensors for input/output - auto a = backend->create_tensor(element::i8, shape); - copy_data(a, vector{1, 2, 3, 4}); - auto result = backend->create_tensor(element::i8, Shape{}); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_EQ((vector{1}), read_vector(result)); - - // For some reason I'm feeling extra paranoid about making sure reduction doesn't clobber the - // input tensors, so let's do this too. - EXPECT_EQ((vector{1, 2, 3, 4}), read_vector(a)); + auto test_case = test::TestCase(f); + test_case.add_input({a}); + test_case.add_expected_output(Shape{}, {1}); + test_case.run(); } NGRAPH_TEST(${BACKEND_NAME}, min_matrix_columns) @@ -149,22 +110,12 @@ NGRAPH_TEST(${BACKEND_NAME}, min_matrix_columns) Shape shape_rt{2}; auto f = make_shared(make_shared(A, AxisSet{0}), ParameterVector{A}); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); + std::vector a{1, 2, 3, 4, 5, 6}; - // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); - copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_tensor(element::f32, shape_rt); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_TRUE(test::all_close_f( - (vector{1, 2}), read_vector(result), MIN_FLOAT_TOLERANCE_BITS)); - - // For some reason I'm feeling extra paranoid about making sure reduction doesn't clobber the - // input tensors, so let's do this too. - EXPECT_TRUE(test::all_close_f( - (vector{1, 2, 3, 4, 5, 6}), read_vector(a), MIN_FLOAT_TOLERANCE_BITS)); + auto test_case = test::TestCase(f); + test_case.add_input({a}); + test_case.add_expected_output(shape_rt, {1, 2}); + test_case.run(MIN_FLOAT_TOLERANCE_BITS); } NGRAPH_TEST(${BACKEND_NAME}, min_matrix_rows) @@ -174,22 +125,12 @@ NGRAPH_TEST(${BACKEND_NAME}, min_matrix_rows) Shape shape_rt{3}; auto f = make_shared(make_shared(A, AxisSet{1}), ParameterVector{A}); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); + std::vector a{1, 2, 3, 4, 5, 6}; - // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); - copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_tensor(element::f32, shape_rt); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_TRUE(test::all_close_f( - (vector{1, 3, 5}), read_vector(result), MIN_FLOAT_TOLERANCE_BITS)); - - // For some reason I'm feeling extra paranoid about making sure reduction doesn't clobber the - // input tensors, so let's do this too. - EXPECT_TRUE(test::all_close_f( - (vector{1, 2, 3, 4, 5, 6}), read_vector(a), MIN_FLOAT_TOLERANCE_BITS)); + auto test_case = test::TestCase(f); + test_case.add_input({a}); + test_case.add_expected_output(shape_rt, {1, 3, 5}); + test_case.run(MIN_FLOAT_TOLERANCE_BITS); } NGRAPH_TEST(${BACKEND_NAME}, min_matrix_rows_int32) @@ -199,20 +140,12 @@ NGRAPH_TEST(${BACKEND_NAME}, min_matrix_rows_int32) Shape shape_rt{3}; auto f = make_shared(make_shared(A, AxisSet{1}), ParameterVector{A}); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); + std::vector a{1, 2, 3, 4, 5, 6}; - // Create some tensors for input/output - auto a = backend->create_tensor(element::i32, shape_a); - copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_tensor(element::i32, shape_rt); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_EQ((vector{1, 3, 5}), read_vector(result)); - - // For some reason I'm feeling extra paranoid about making sure reduction doesn't clobber the - // input tensors, so let's do this too. - EXPECT_EQ((vector{1, 2, 3, 4, 5, 6}), read_vector(a)); + auto test_case = test::TestCase(f); + test_case.add_input({a}); + test_case.add_expected_output(shape_rt, {1, 3, 5}); + test_case.run(); } NGRAPH_TEST(${BACKEND_NAME}, min_matrix_rows_zero) @@ -222,25 +155,15 @@ NGRAPH_TEST(${BACKEND_NAME}, min_matrix_rows_zero) Shape shape_rt{3}; auto f = make_shared(make_shared(A, AxisSet{1}), ParameterVector{A}); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); + std::vector a{}; - // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); - copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); - copy_data(result, vector({3, 3, 3})); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_EQ((vector{std::numeric_limits::infinity(), - std::numeric_limits::infinity(), - std::numeric_limits::infinity()}), - read_vector(result)); - - // For some reason I'm feeling extra paranoid about making sure reduction doesn't clobber the - // input tensors, so let's do this too. - EXPECT_TRUE( - test::all_close_f((vector{}), read_vector(a), MIN_FLOAT_TOLERANCE_BITS)); + auto test_case = test::TestCase(f); + test_case.add_input({a}); + test_case.add_expected_output(shape_rt, + {std::numeric_limits::infinity(), + std::numeric_limits::infinity(), + std::numeric_limits::infinity()}); + test_case.run(); } NGRAPH_TEST(${BACKEND_NAME}, min_matrix_cols_zero) @@ -251,24 +174,13 @@ NGRAPH_TEST(${BACKEND_NAME}, min_matrix_cols_zero) Shape shape_rt{2}; auto f = make_shared(make_shared(A, AxisSet{0}), ParameterVector{A}); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); + std::vector a{}; - // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); - copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); - copy_data(result, vector({3, 3})); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_EQ((vector{std::numeric_limits::infinity(), - std::numeric_limits::infinity()}), - read_vector(result)); - - // For some reason I'm feeling extra paranoid about making sure reduction doesn't clobber the - // input tensors, so let's do this too. - EXPECT_TRUE( - test::all_close_f((vector{}), read_vector(a), MIN_FLOAT_TOLERANCE_BITS)); + auto test_case = test::TestCase(f); + test_case.add_input({a}); + test_case.add_expected_output( + shape_rt, {std::numeric_limits::infinity(), std::numeric_limits::infinity()}); + test_case.run(); } NGRAPH_TEST(${BACKEND_NAME}, min_vector_zero) @@ -278,22 +190,12 @@ NGRAPH_TEST(${BACKEND_NAME}, min_vector_zero) Shape shape_rt{}; auto f = make_shared(make_shared(A, AxisSet{0}), ParameterVector{A}); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); + std::vector a{}; - // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); - copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); - copy_data(result, vector({3})); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_EQ((vector{std::numeric_limits::infinity()}), read_vector(result)); - - // For some reason I'm feeling extra paranoid about making sure reduction doesn't clobber the - // input tensors, so let's do this too. - EXPECT_TRUE( - test::all_close_f((vector{}), read_vector(a), MIN_FLOAT_TOLERANCE_BITS)); + auto test_case = test::TestCase(f); + test_case.add_input({a}); + test_case.add_expected_output(shape_rt, {std::numeric_limits::infinity()}); + test_case.run(); } NGRAPH_TEST(${BACKEND_NAME}, min_matrix_to_scalar_zero_by_zero) @@ -303,22 +205,12 @@ NGRAPH_TEST(${BACKEND_NAME}, min_matrix_to_scalar_zero_by_zero) Shape shape_rt{}; auto f = make_shared(make_shared(A, AxisSet{0, 1}), ParameterVector{A}); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); + std::vector a{}; - // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); - copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); - copy_data(result, vector({3})); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_EQ((vector{std::numeric_limits::infinity()}), read_vector(result)); - - // For some reason I'm feeling extra paranoid about making sure reduction doesn't clobber the - // input tensors, so let's do this too. - EXPECT_TRUE( - test::all_close_f((vector{}), read_vector(a), MIN_FLOAT_TOLERANCE_BITS)); + auto test_case = test::TestCase(f); + test_case.add_input({a}); + test_case.add_expected_output(shape_rt, {std::numeric_limits::infinity()}); + test_case.run(); } NGRAPH_TEST(${BACKEND_NAME}, min_3d_to_matrix_most_sig) @@ -328,19 +220,13 @@ NGRAPH_TEST(${BACKEND_NAME}, min_3d_to_matrix_most_sig) Shape shape_rt{3, 3}; auto f = make_shared(make_shared(A, AxisSet{0}), ParameterVector{A}); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); + std::vector a{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, + 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27}; - // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); - copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, - 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27}); - auto result = backend->create_tensor(element::f32, shape_rt); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_TRUE(test::all_close_f((vector{1, 2, 3, 4, 5, 6, 7, 8, 9}), - read_vector(result), - MIN_FLOAT_TOLERANCE_BITS)); + auto test_case = test::TestCase(f); + test_case.add_input({a}); + test_case.add_expected_output(shape_rt, {1, 2, 3, 4, 5, 6, 7, 8, 9}); + test_case.run(MIN_FLOAT_TOLERANCE_BITS); } NGRAPH_TEST(${BACKEND_NAME}, min_3d_to_matrix_least_sig) @@ -350,19 +236,13 @@ NGRAPH_TEST(${BACKEND_NAME}, min_3d_to_matrix_least_sig) Shape shape_rt{3, 3}; auto f = make_shared(make_shared(A, AxisSet{2}), ParameterVector{A}); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); + std::vector a{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, + 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27}; - // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); - copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, - 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27}); - auto result = backend->create_tensor(element::f32, shape_rt); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_TRUE(test::all_close_f((vector{1, 4, 7, 10, 13, 16, 19, 22, 25}), - read_vector(result), - MIN_FLOAT_TOLERANCE_BITS)); + auto test_case = test::TestCase(f); + test_case.add_input({a}); + test_case.add_expected_output(shape_rt, {1, 4, 7, 10, 13, 16, 19, 22, 25}); + test_case.run(MIN_FLOAT_TOLERANCE_BITS); } NGRAPH_TEST(${BACKEND_NAME}, min_3d_to_vector) @@ -372,18 +252,13 @@ NGRAPH_TEST(${BACKEND_NAME}, min_3d_to_vector) Shape shape_rt{3}; auto f = make_shared(make_shared(A, AxisSet{0, 1}), ParameterVector{A}); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); + std::vector a{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, + 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27}; - // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); - copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, - 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27}); - auto result = backend->create_tensor(element::f32, shape_rt); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_TRUE(test::all_close_f( - (vector{1, 2, 3}), read_vector(result), MIN_FLOAT_TOLERANCE_BITS)); + auto test_case = test::TestCase(f); + test_case.add_input({a}); + test_case.add_expected_output(shape_rt, {1, 2, 3}); + test_case.run(MIN_FLOAT_TOLERANCE_BITS); } NGRAPH_TEST(${BACKEND_NAME}, min_3d_to_scalar) @@ -393,18 +268,13 @@ NGRAPH_TEST(${BACKEND_NAME}, min_3d_to_scalar) Shape shape_rt{}; auto f = make_shared(make_shared(A, AxisSet{0, 1, 2}), ParameterVector{A}); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); + std::vector a{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1}; - // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); - copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, - 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1}); - auto result = backend->create_tensor(element::f32, shape_rt); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_TRUE(test::all_close_f( - (vector{1}), read_vector(result), MIN_FLOAT_TOLERANCE_BITS)); + auto test_case = test::TestCase(f); + test_case.add_input({a}); + test_case.add_expected_output(shape_rt, {1}); + test_case.run(MIN_FLOAT_TOLERANCE_BITS); } NGRAPH_TEST(${BACKEND_NAME}, min_3d_to_scalar_int32) @@ -414,17 +284,13 @@ NGRAPH_TEST(${BACKEND_NAME}, min_3d_to_scalar_int32) Shape shape_rt{}; auto f = make_shared(make_shared(A, AxisSet{0, 1, 2}), ParameterVector{A}); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); + std::vector a{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1}; - // Create some tensors for input/output - auto a = backend->create_tensor(element::i32, shape_a); - copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, - 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1}); - auto result = backend->create_tensor(element::i32, shape_rt); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_EQ((vector{1}), read_vector(result)); + auto test_case = test::TestCase(f); + test_case.add_input({a}); + test_case.add_expected_output(shape_rt, {1}); + test_case.run(); } NGRAPH_TEST(${BACKEND_NAME}, min_3d_eliminate_zero_dim) @@ -434,20 +300,12 @@ NGRAPH_TEST(${BACKEND_NAME}, min_3d_eliminate_zero_dim) Shape shape_rt{3, 2}; auto f = make_shared(make_shared(A, AxisSet{1}), ParameterVector{A}); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); - copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); - - // Overwrite the initial result vector to make sure we're not just coincidentally getting the - // right value. - copy_data(result, vector{2112, 2112, 2112, 2112, 2112, 2112}); + std::vector a{}; float inf = std::numeric_limits::infinity(); - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_EQ((vector{inf, inf, inf, inf, inf, inf}), read_vector(result)); + auto test_case = test::TestCase(f); + test_case.add_input({a}); + test_case.add_expected_output(shape_rt, {inf, inf, inf, inf, inf, inf}); + test_case.run(); } diff --git a/ngraph/test/backend/minimum.in.cpp b/ngraph/test/backend/minimum.in.cpp index 926babd5b32..fcd18dc6b57 100644 --- a/ngraph/test/backend/minimum.in.cpp +++ b/ngraph/test/backend/minimum.in.cpp @@ -32,14 +32,10 @@ // clang-format on #include "gtest/gtest.h" -#include "runtime/backend.hpp" -#include "ngraph/runtime/tensor.hpp" #include "ngraph/ngraph.hpp" -#include "util/all_close.hpp" -#include "util/all_close_f.hpp" -#include "util/ndarray.hpp" +#include "util/engine/test_engines.hpp" +#include "util/test_case.hpp" #include "util/test_control.hpp" -#include "util/test_tools.hpp" NGRAPH_SUPPRESS_DEPRECATED_START @@ -47,6 +43,7 @@ using namespace std; using namespace ngraph; static string s_manifest = "${MANIFEST}"; +using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); NGRAPH_TEST(${BACKEND_NAME}, minimum) { @@ -55,19 +52,13 @@ NGRAPH_TEST(${BACKEND_NAME}, minimum) auto B = make_shared(element::f32, shape); auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); + std::vector a{1, 8, -8, 17, -0.5, 0.5, 2, 1}; + std::vector b{1, 2, 4, 8, 0, 0, 1, 1.5}; - // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); - copy_data(a, vector{1, 8, -8, 17, -0.5, 0.5, 2, 1}); - auto b = backend->create_tensor(element::f32, shape); - copy_data(b, vector{1, 2, 4, 8, 0, 0, 1, 1.5}); - auto result = backend->create_tensor(element::f32, shape); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a, b}); - EXPECT_TRUE( - test::all_close_f((vector{1, 2, -8, 8, -.5, 0, 1, 1}), read_vector(result))); + auto test_case = test::TestCase(f); + test_case.add_multiple_inputs({a, b}); + test_case.add_expected_output(shape, {1, 2, -8, 8, -.5, 0, 1, 1}); + test_case.run(); } NGRAPH_TEST(${BACKEND_NAME}, minimum_int32) @@ -77,18 +68,13 @@ NGRAPH_TEST(${BACKEND_NAME}, minimum_int32) auto B = make_shared(element::i32, shape); auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); + std::vector a{1, 8, -8, 17, -5, 67635216, 2, 1}; + std::vector b{1, 2, 4, 8, 0, 18448, 1, 6}; - // Create some tensors for input/output - auto a = backend->create_tensor(element::i32, shape); - copy_data(a, vector{1, 8, -8, 17, -5, 67635216, 2, 1}); - auto b = backend->create_tensor(element::i32, shape); - copy_data(b, vector{1, 2, 4, 8, 0, 18448, 1, 6}); - auto result = backend->create_tensor(element::i32, shape); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a, b}); - EXPECT_EQ((vector{1, 2, -8, 8, -5, 18448, 1, 1}), read_vector(result)); + auto test_case = test::TestCase(f); + test_case.add_multiple_inputs({a, b}); + test_case.add_expected_output(shape, {1, 2, -8, 8, -5, 18448, 1, 1}); + test_case.run(); } NGRAPH_TEST(${BACKEND_NAME}, minimum_int64) @@ -98,16 +84,11 @@ NGRAPH_TEST(${BACKEND_NAME}, minimum_int64) auto B = make_shared(element::i64, shape); auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); + std::vector a{1, 8, -8, 17, -5, 67635216, 2, 17179887632}; + std::vector b{1, 2, 4, 8, 0, 18448, 1, 280592}; - // Create some tensors for input/output - auto a = backend->create_tensor(element::i64, shape); - copy_data(a, vector{1, 8, -8, 17, -5, 67635216, 2, 17179887632}); - auto b = backend->create_tensor(element::i64, shape); - copy_data(b, vector{1, 2, 4, 8, 0, 18448, 1, 280592}); - auto result = backend->create_tensor(element::i64, shape); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a, b}); - EXPECT_EQ((vector{1, 2, -8, 8, -5, 18448, 1, 280592}), read_vector(result)); + auto test_case = test::TestCase(f); + test_case.add_multiple_inputs({a, b}); + test_case.add_expected_output(shape, {1, 2, -8, 8, -5, 18448, 1, 280592}); + test_case.run(); } diff --git a/ngraph/test/backend/quantize_dequantize.in.cpp b/ngraph/test/backend/quantize_dequantize.in.cpp index 53bdf76e186..c90143f0703 100644 --- a/ngraph/test/backend/quantize_dequantize.in.cpp +++ b/ngraph/test/backend/quantize_dequantize.in.cpp @@ -16,14 +16,9 @@ #include "gtest/gtest.h" #include "ngraph/ngraph.hpp" -#include "ngraph/runtime/tensor.hpp" -#include "runtime/backend.hpp" -#include "util/all_close.hpp" -#include "util/all_close_f.hpp" -#include "util/known_element_types.hpp" -#include "util/ndarray.hpp" +#include "util/engine/test_engines.hpp" +#include "util/test_case.hpp" #include "util/test_control.hpp" -#include "util/test_tools.hpp" NGRAPH_SUPPRESS_DEPRECATED_START @@ -31,6 +26,7 @@ using namespace std; using namespace ngraph; static string s_manifest = "${MANIFEST}"; +using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); NGRAPH_TEST(${BACKEND_NAME}, quantize) { @@ -53,20 +49,16 @@ NGRAPH_TEST(${BACKEND_NAME}, quantize) make_shared(X, scale, offset, output_type, quantization_axes, round_mode); auto f = make_shared(quantize, ParameterVector{X}); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto x = backend->create_tensor(input_type, input_shape); - auto y = backend->create_tensor(output_type, input_shape); - - copy_data(x, vector{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}); + std::vector x{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}; // divide by scale 2 2 2 2 2 2 2 2 2 2 2 2 // equals (rounded) 0 0 1 2 2 2 3 4 4 4 5 6 // plus offset 1 1 1 1 1 1 1 1 1 1 1 1 // equals 1 1 2 3 3 3 4 5 5 5 6 7 - auto handle = backend->compile(f); - handle->call_with_validate({y}, {x}); - EXPECT_EQ((vector{1, 1, 2, 3, 3, 3, 4, 5, 5, 5, 6, 7}), - read_vector(y)); + auto test_case = test::TestCase(f); + test_case.add_input({x}); + test_case.add_expected_output(input_shape, {1, 1, 2, 3, 3, 3, 4, 5, 5, 5, 6, 7}); + test_case.run(); } NGRAPH_TEST(${BACKEND_NAME}, dequantize) @@ -87,21 +79,17 @@ NGRAPH_TEST(${BACKEND_NAME}, dequantize) auto dequantize = make_shared(X, scale, offset, output_type, quantization_axes); auto f = make_shared(dequantize, ParameterVector{X}); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto x = backend->create_tensor(input_type, input_shape); - auto y = backend->create_tensor(output_type, input_shape); - - copy_data(x, vector{1, 1, 2, 3, 3, 3, 4, 5, 5, 5, 6, 7}); + std::vector x{{1, 1, 2, 3, 3, 3, 4, 5, 5, 5, 6, 7}}; // minus offset 1 1 1 1 1 1 1 1 1 1 1 1 // eqauls 0 0 1 2 2 2 3 4 4 4 5 6 // multiplied by scale 2 2 2 2 2 2 2 2 2 2 2 2 // equals 0 0 2 4 4 4 6 8 8 8 10 12 - auto handle = backend->compile(f); - handle->call_with_validate({y}, {x}); - EXPECT_TRUE(test::all_close_f((vector{0, 0, 2, 4, 4, 4, 6, 8, 8, 8, 10, 12}), - read_vector(y), - MIN_FLOAT_TOLERANCE_BITS)); + auto test_case = test::TestCase(f); + test_case.add_input({x}); + test_case.add_expected_output(input_shape, + {0, 0, 2, 4, 4, 4, 6, 8, 8, 8, 10, 12}); + test_case.run(MIN_FLOAT_TOLERANCE_BITS); } NGRAPH_TEST(${BACKEND_NAME}, quantize_zero_offset) @@ -125,20 +113,16 @@ NGRAPH_TEST(${BACKEND_NAME}, quantize_zero_offset) make_shared(X, scale, offset, output_type, quantization_axes, round_mode); auto f = make_shared(quantize, ParameterVector{X}); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto x = backend->create_tensor(input_type, input_shape); - auto y = backend->create_tensor(output_type, input_shape); - - copy_data(x, vector{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}); + std::vector x{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}; // divide by scale 2 2 2 2 2 2 2 2 2 2 2 2 // equals (rounded) 0 0 1 2 2 2 3 4 4 4 5 6 // plus offset 0 0 0 0 0 0 0 0 0 0 0 0 // equals 0 0 1 2 2 2 3 4 4 4 5 6 - auto handle = backend->compile(f); - handle->call_with_validate({y}, {x}); - EXPECT_EQ((vector{0, 0, 1, 2, 2, 2, 3, 4, 4, 4, 5, 6}), - read_vector(y)); + auto test_case = test::TestCase(f); + test_case.add_input({x}); + test_case.add_expected_output(input_shape, {0, 0, 1, 2, 2, 2, 3, 4, 4, 4, 5, 6}); + test_case.run(); } NGRAPH_TEST(${BACKEND_NAME}, dequantize_zero_offset) @@ -159,21 +143,17 @@ NGRAPH_TEST(${BACKEND_NAME}, dequantize_zero_offset) auto dequantize = make_shared(X, scale, offset, output_type, quantization_axes); auto f = make_shared(dequantize, ParameterVector{X}); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto x = backend->create_tensor(input_type, input_shape); - auto y = backend->create_tensor(output_type, input_shape); - - copy_data(x, vector{0, 0, 1, 2, 2, 2, 3, 4, 4, 4, 5, 6}); + std::vector x{0, 0, 1, 2, 2, 2, 3, 4, 4, 4, 5, 6}; // minus offset 0 0 0 0 0 0 0 0 0 0 0 0 // equals 0 0 1 2 2 2 3 4 4 4 5 6 // multiplied by scale 2 2 2 2 2 2 2 2 2 2 2 2 // equals 0 0 2 4 4 4 6 8 8 8 10 12 - auto handle = backend->compile(f); - handle->call_with_validate({y}, {x}); - EXPECT_TRUE(test::all_close_f((vector{0, 0, 2, 4, 4, 4, 6, 8, 8, 8, 10, 12}), - read_vector(y), - MIN_FLOAT_TOLERANCE_BITS)); + auto test_case = test::TestCase(f); + test_case.add_input({x}); + test_case.add_expected_output(input_shape, + {0, 0, 2, 4, 4, 4, 6, 8, 8, 8, 10, 12}); + test_case.run(MIN_FLOAT_TOLERANCE_BITS); } NGRAPH_TEST(${BACKEND_NAME}, quantize_axes) @@ -197,20 +177,17 @@ NGRAPH_TEST(${BACKEND_NAME}, quantize_axes) make_shared(X, scale, offset, output_type, quantization_axes, round_mode); auto f = make_shared(quantize, ParameterVector{X}); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto x = backend->create_tensor(input_type, input_shape); - auto y = backend->create_tensor(output_type, input_shape); - - copy_data(x, vector{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}); + std::vector x{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}; // divided by scale 2 2 2 3 3 3 4 4 4 5 5 5 // equals (rounded) 0 1 1 1 1 2 2 2 2 2 2 2 // plus offset 10 10 10 20 20 20 30 30 30 40 40 40 // equals 10 11 11 21 21 22 32 32 32 42 42 42 - auto handle = backend->compile(f); - handle->call_with_validate({y}, {x}); - EXPECT_EQ((vector{10, 11, 11, 21, 21, 22, 32, 32, 32, 42, 42, 42}), - read_vector(y)); + auto test_case = test::TestCase(f); + test_case.add_input({x}); + test_case.add_expected_output(input_shape, + {10, 11, 11, 21, 21, 22, 32, 32, 32, 42, 42, 42}); + test_case.run(); } NGRAPH_TEST(${BACKEND_NAME}, dequantize_axes) @@ -231,21 +208,17 @@ NGRAPH_TEST(${BACKEND_NAME}, dequantize_axes) auto dequantize = make_shared(X, scale, offset, output_type, quantization_axes); auto f = make_shared(dequantize, ParameterVector{X}); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto x = backend->create_tensor(input_type, input_shape); - auto y = backend->create_tensor(output_type, input_shape); - - copy_data(x, vector{10, 11, 11, 21, 21, 22, 32, 32, 32, 42, 42, 42}); + std::vector x{10, 11, 11, 21, 21, 22, 32, 32, 32, 42, 42, 42}; // minus offset 10 10 10 20 20 20 30 30 30 40 40 40 // equals 0 1 1 1 1 2 2 2 2 2 2 2 // multiplied by scale 2 2 2 3 3 3 4 4 4 5 5 5 // equals 0 2 2 3 3 6 8 8 8 10 10 10 - auto handle = backend->compile(f); - handle->call_with_validate({y}, {x}); - EXPECT_TRUE(test::all_close_f((vector{0, 2, 2, 3, 3, 6, 8, 8, 8, 10, 10, 10}), - read_vector(y), - MIN_FLOAT_TOLERANCE_BITS)); + auto test_case = test::TestCase(f); + test_case.add_input({x}); + test_case.add_expected_output(input_shape, + {0, 2, 2, 3, 3, 6, 8, 8, 8, 10, 10, 10}); + test_case.run(MIN_FLOAT_TOLERANCE_BITS); } NGRAPH_TEST(${BACKEND_NAME}, quantize_int8) @@ -269,20 +242,17 @@ NGRAPH_TEST(${BACKEND_NAME}, quantize_int8) make_shared(X, scale, offset, output_type, quantization_axes, round_mode); auto f = make_shared(quantize, ParameterVector{X}); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto x = backend->create_tensor(input_type, input_shape); - auto y = backend->create_tensor(output_type, input_shape); - - copy_data(x, vector{0, -1, 2, -3, 4, -5, 6, -7, 8, -9, 10, -11}); + std::vector x{0, -1, 2, -3, 4, -5, 6, -7, 8, -9, 10, -11}; // divide by scale 2 2 2 2 2 2 2 2 2 2 2 2 // equals (rounded) 0 0 1 -2 2 -2 3 -4 4 -4 5 -6 // plus offset 1 1 1 1 1 1 1 1 1 1 1 1 // equals 1 1 2 -1 3 -1 4 -3 5 -3 6 -5 - auto handle = backend->compile(f); - handle->call_with_validate({y}, {x}); - EXPECT_EQ((vector{1, 1, 2, -1, 3, -1, 4, -3, 5, -3, 6, -5}), - read_vector(y)); + auto test_case = test::TestCase(f); + test_case.add_input({x}); + test_case.add_expected_output(input_shape, + {1, 1, 2, -1, 3, -1, 4, -3, 5, -3, 6, -5}); + test_case.run(); } NGRAPH_TEST(${BACKEND_NAME}, dequantize_int8) @@ -303,22 +273,17 @@ NGRAPH_TEST(${BACKEND_NAME}, dequantize_int8) auto dequantize = make_shared(X, scale, offset, output_type, quantization_axes); auto f = make_shared(dequantize, ParameterVector{X}); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto x = backend->create_tensor(input_type, input_shape); - auto y = backend->create_tensor(output_type, input_shape); - - copy_data(x, vector{1, 1, 2, -1, 3, -1, 4, -3, 5, -3, 6, -5}); + std::vector x{1, 1, 2, -1, 3, -1, 4, -3, 5, -3, 6, -5}; // minus offset 1 1 1 1 1 1 1 1 1 1 1 1 // equals 0 0 1 -2 2 -2 3 -4 4 -4 5 -6 // multiplied by scale 2 2 2 2 2 2 2 2 2 2 2 2 // equals 0 0 2 -4 4 -4 6 -8 8 -8 10 -12 - auto handle = backend->compile(f); - handle->call_with_validate({y}, {x}); - EXPECT_TRUE( - test::all_close_f((vector{0, 0, 2, -4, 4, -4, 6, -8, 8, -8, 10, -12}), - read_vector(y), - MIN_FLOAT_TOLERANCE_BITS)); + auto test_case = test::TestCase(f); + test_case.add_input({x}); + test_case.add_expected_output(input_shape, + {0, 0, 2, -4, 4, -4, 6, -8, 8, -8, 10, -12}); + test_case.run(MIN_FLOAT_TOLERANCE_BITS); } NGRAPH_TEST(${BACKEND_NAME}, quantize_int8_zero_offset) @@ -342,20 +307,17 @@ NGRAPH_TEST(${BACKEND_NAME}, quantize_int8_zero_offset) make_shared(X, scale, offset, output_type, quantization_axes, round_mode); auto f = make_shared(quantize, ParameterVector{X}); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto x = backend->create_tensor(input_type, input_shape); - auto y = backend->create_tensor(output_type, input_shape); - - copy_data(x, vector{0, -1, 2, -3, 4, -5, 6, -7, 8, -9, 10, -11}); + std::vector x{0, -1, 2, -3, 4, -5, 6, -7, 8, -9, 10, -11}; // divide by scale 2 2 2 2 2 2 2 2 2 2 2 2 // equals (rounded) 0 0 1 -2 2 -2 3 -4 4 -4 5 -6 // plus offset 0 0 0 0 0 0 0 0 0 0 0 0 // equals 0 0 1 -2 2 -2 3 -4 4 -4 5 -6 - auto handle = backend->compile(f); - handle->call_with_validate({y}, {x}); - EXPECT_EQ((vector{0, 0, 1, -2, 2, -2, 3, -4, 4, -4, 5, -6}), - read_vector(y)); + auto test_case = test::TestCase(f); + test_case.add_input({x}); + test_case.add_expected_output(input_shape, + {0, 0, 1, -2, 2, -2, 3, -4, 4, -4, 5, -6}); + test_case.run(); } NGRAPH_TEST(${BACKEND_NAME}, dequantize_int8_zero_offset) @@ -376,22 +338,17 @@ NGRAPH_TEST(${BACKEND_NAME}, dequantize_int8_zero_offset) auto dequantize = make_shared(X, scale, offset, output_type, quantization_axes); auto f = make_shared(dequantize, ParameterVector{X}); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto x = backend->create_tensor(input_type, input_shape); - auto y = backend->create_tensor(output_type, input_shape); - - copy_data(x, vector{0, 0, 1, -2, 2, -2, 3, -4, 4, -4, 5, -6}); + std::vector x{0, 0, 1, -2, 2, -2, 3, -4, 4, -4, 5, -6}; // minus offset 0 0 0 0 0 0 0 0 0 0 0 0 // equals 0 0 1 -2 2 -2 3 -4 4 -4 5 -6 // multiplied by scale 2 2 2 2 2 2 2 2 2 2 2 2 // equals 0 0 2 -4 4 -4 6 -8 8 -8 10 -12 - auto handle = backend->compile(f); - handle->call_with_validate({y}, {x}); - EXPECT_TRUE( - test::all_close_f((vector{0, 0, 2, -4, 4, -4, 6, -8, 8, -8, 10, -12}), - read_vector(y), - MIN_FLOAT_TOLERANCE_BITS)); + auto test_case = test::TestCase(f); + test_case.add_input({x}); + test_case.add_expected_output(input_shape, + {0, 0, 2, -4, 4, -4, 6, -8, 8, -8, 10, -12}); + test_case.run(MIN_FLOAT_TOLERANCE_BITS); } NGRAPH_TEST(${BACKEND_NAME}, quantize_int32) @@ -415,20 +372,17 @@ NGRAPH_TEST(${BACKEND_NAME}, quantize_int32) make_shared(X, scale, offset, output_type, quantization_axes, round_mode); auto f = make_shared(quantize, ParameterVector{X}); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto x = backend->create_tensor(input_type, input_shape); - auto y = backend->create_tensor(output_type, input_shape); - - copy_data(x, vector{0, -1, 2, -3, 4, -5, 6, -7, 8, -9, 10, -11}); + std::vector x{0, -1, 2, -3, 4, -5, 6, -7, 8, -9, 10, -11}; // divide by scale 2 2 2 2 2 2 2 2 2 2 2 2 // equals (rounded) 0 0 1 -2 2 -2 3 -4 4 -4 5 -6 // plus offset 1 1 1 1 1 1 1 1 1 1 1 1 // equals 1 1 2 -1 3 -1 4 -3 5 -3 6 -5 - auto handle = backend->compile(f); - handle->call_with_validate({y}, {x}); - EXPECT_EQ((vector{1, 1, 2, -1, 3, -1, 4, -3, 5, -3, 6, -5}), - read_vector(y)); + auto test_case = test::TestCase(f); + test_case.add_input({x}); + test_case.add_expected_output(input_shape, + {1, 1, 2, -1, 3, -1, 4, -3, 5, -3, 6, -5}); + test_case.run(); } NGRAPH_TEST(${BACKEND_NAME}, dequantize_int32) @@ -449,22 +403,17 @@ NGRAPH_TEST(${BACKEND_NAME}, dequantize_int32) auto dequantize = make_shared(X, scale, offset, output_type, quantization_axes); auto f = make_shared(dequantize, ParameterVector{X}); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto x = backend->create_tensor(input_type, input_shape); - auto y = backend->create_tensor(output_type, input_shape); - - copy_data(x, vector{1, 1, 2, -1, 3, -1, 4, -3, 5, -3, 6, -5}); + std::vector x{1, 1, 2, -1, 3, -1, 4, -3, 5, -3, 6, -5}; // minus offset 1 1 1 1 1 1 1 1 1 1 1 1 // equals 0 0 1 -2 2 -2 3 -4 4 -4 5 -6 // multiplied by scale 2 2 2 2 2 2 2 2 2 2 2 2 // equals 0 0 2 -4 4 -4 6 -8 8 -8 10 -12 - auto handle = backend->compile(f); - handle->call_with_validate({y}, {x}); - EXPECT_TRUE( - test::all_close_f((vector{0, 0, 2, -4, 4, -4, 6, -8, 8, -8, 10, -12}), - read_vector(y), - MIN_FLOAT_TOLERANCE_BITS)); + auto test_case = test::TestCase(f); + test_case.add_input({x}); + test_case.add_expected_output(input_shape, + {0, 0, 2, -4, 4, -4, 6, -8, 8, -8, 10, -12}); + test_case.run(MIN_FLOAT_TOLERANCE_BITS); } NGRAPH_TEST(${BACKEND_NAME}, quantize_int32_zero_offset) @@ -488,20 +437,17 @@ NGRAPH_TEST(${BACKEND_NAME}, quantize_int32_zero_offset) make_shared(X, scale, offset, output_type, quantization_axes, round_mode); auto f = make_shared(quantize, ParameterVector{X}); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto x = backend->create_tensor(input_type, input_shape); - auto y = backend->create_tensor(output_type, input_shape); - - copy_data(x, vector{0, -1, 2, -3, 4, -5, 6, -7, 8, -9, 10, -11}); + std::vector x{0, -1, 2, -3, 4, -5, 6, -7, 8, -9, 10, -11}; // divide by scale 2 2 2 2 2 2 2 2 2 2 2 2 // equals (rounded) 0 0 1 -2 2 -2 3 -4 4 -4 5 -6 // plus offset 0 0 0 0 0 0 0 0 0 0 0 0 // equals 0 0 1 -2 2 -2 3 -4 4 -4 5 -6 - auto handle = backend->compile(f); - handle->call_with_validate({y}, {x}); - EXPECT_EQ((vector{0, 0, 1, -2, 2, -2, 3, -4, 4, -4, 5, -6}), - read_vector(y)); + auto test_case = test::TestCase(f); + test_case.add_input({x}); + test_case.add_expected_output(input_shape, + {0, 0, 1, -2, 2, -2, 3, -4, 4, -4, 5, -6}); + test_case.run(); } NGRAPH_TEST(${BACKEND_NAME}, dequantize_int32_zero_offset) @@ -522,22 +468,17 @@ NGRAPH_TEST(${BACKEND_NAME}, dequantize_int32_zero_offset) auto dequantize = make_shared(X, scale, offset, output_type, quantization_axes); auto f = make_shared(dequantize, ParameterVector{X}); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto x = backend->create_tensor(input_type, input_shape); - auto y = backend->create_tensor(output_type, input_shape); - - copy_data(x, vector{0, 0, 1, -2, 2, -2, 3, -4, 4, -4, 5, -6}); + std::vector x{0, 0, 1, -2, 2, -2, 3, -4, 4, -4, 5, -6}; // minus offset 0 0 0 0 0 0 0 0 0 0 0 0 // equals 0 0 1 -2 2 -2 3 -4 4 -4 5 -6 // multiplied by scale 2 2 2 2 2 2 2 2 2 2 2 2 // equals 0 0 2 -4 4 -4 6 -8 8 -8 10 -12 - auto handle = backend->compile(f); - handle->call_with_validate({y}, {x}); - EXPECT_TRUE( - test::all_close_f((vector{0, 0, 2, -4, 4, -4, 6, -8, 8, -8, 10, -12}), - read_vector(y), - MIN_FLOAT_TOLERANCE_BITS)); + auto test_case = test::TestCase(f); + test_case.add_input({x}); + test_case.add_expected_output(input_shape, + {0, 0, 2, -4, 4, -4, 6, -8, 8, -8, 10, -12}); + test_case.run(MIN_FLOAT_TOLERANCE_BITS); } NGRAPH_TEST(${BACKEND_NAME}, quantize_clamp_uint8) @@ -563,16 +504,13 @@ NGRAPH_TEST(${BACKEND_NAME}, quantize_clamp_uint8) make_shared(X, scale, offset, output_type, quantization_axes, round_mode); auto f = make_shared(quantize, ParameterVector{X}); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto x = backend->create_tensor(input_type, input_shape); - auto y = backend->create_tensor(output_type, input_shape); + std::vector x{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}; - copy_data(x, vector{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}); - - auto handle = backend->compile(f); - handle->call_with_validate({y}, {x}); - EXPECT_EQ((vector{0, max, max, max, max, max, max, max, max, max, max, max}), - read_vector(y)); + auto test_case = test::TestCase(f); + test_case.add_input({x}); + test_case.add_expected_output( + input_shape, {0, max, max, max, max, max, max, max, max, max, max, max}); + test_case.run(); } NGRAPH_TEST(${BACKEND_NAME}, quantize_clamp_int8) @@ -599,16 +537,13 @@ NGRAPH_TEST(${BACKEND_NAME}, quantize_clamp_int8) make_shared(X, scale, offset, output_type, quantization_axes, round_mode); auto f = make_shared(quantize, ParameterVector{X}); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto x = backend->create_tensor(input_type, input_shape); - auto y = backend->create_tensor(output_type, input_shape); + std::vector x{0, -1, 2, -3, 4, -5, 6, -7, 8, -9, 10, -11}; - copy_data(x, vector{0, -1, 2, -3, 4, -5, 6, -7, 8, -9, 10, -11}); - - auto handle = backend->compile(f); - handle->call_with_validate({y}, {x}); - EXPECT_EQ((vector{0, min, max, min, max, min, max, min, max, min, max, min}), - read_vector(y)); + auto test_case = test::TestCase(f); + test_case.add_input({x}); + test_case.add_expected_output( + input_shape, {0, min, max, min, max, min, max, min, max, min, max, min}); + test_case.run(); } NGRAPH_TEST(${BACKEND_NAME}, quantize_clamp_int32) @@ -636,16 +571,13 @@ NGRAPH_TEST(${BACKEND_NAME}, quantize_clamp_int32) make_shared(X, scale, offset, output_type, quantization_axes, round_mode); auto f = make_shared(quantize, ParameterVector{X}); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto x = backend->create_tensor(input_type, input_shape); - auto y = backend->create_tensor(output_type, input_shape); + std::vector x{0, -1, 2, -3, 4, -5, 6, -7, 8, -9, 10, -11}; - copy_data(x, vector{0, -1, 2, -3, 4, -5, 6, -7, 8, -9, 10, -11}); - - auto handle = backend->compile(f); - handle->call_with_validate({y}, {x}); - EXPECT_EQ((vector{0, min, max, min, max, min, max, min, max, min, max, min}), - read_vector(y)); + auto test_case = test::TestCase(f); + test_case.add_input({x}); + test_case.add_expected_output( + input_shape, {0, min, max, min, max, min, max, min, max, min, max, min}); + test_case.run(); } NGRAPH_TEST(${BACKEND_NAME}, quantize_ROUND_NEAREST_TOWARD_ZERO) @@ -669,18 +601,15 @@ NGRAPH_TEST(${BACKEND_NAME}, quantize_ROUND_NEAREST_TOWARD_ZERO) make_shared(X, scale, offset, output_type, quantization_axes, round_mode); auto f = make_shared(quantize, ParameterVector{X}); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto x = backend->create_tensor(input_type, input_shape); - auto y = backend->create_tensor(output_type, input_shape); - - copy_data(x, vector{9, 10, 11, -9, -10, -11, 13, 14, 15, -13, -14, -15}); + std::vector x{9, 10, 11, -9, -10, -11, 13, 14, 15, -13, -14, -15}; // divide by scale 4 4 4 4 4 4 4 4 4 4 4 4 // equals (rounded) 2 2 3 -2 -2 -3 3 3 4 -3 -3 -4 - auto handle = backend->compile(f); - handle->call_with_validate({y}, {x}); - EXPECT_EQ((vector{2, 2, 3, -2, -2, -3, 3, 3, 4, -3, -3, -4}), - read_vector(y)); + auto test_case = test::TestCase(f); + test_case.add_input({x}); + test_case.add_expected_output(input_shape, + {2, 2, 3, -2, -2, -3, 3, 3, 4, -3, -3, -4}); + test_case.run(); } NGRAPH_TEST(${BACKEND_NAME}, quantize_ROUND_NEAREST_TOWARD_INFINITY) @@ -704,18 +633,15 @@ NGRAPH_TEST(${BACKEND_NAME}, quantize_ROUND_NEAREST_TOWARD_INFINITY) make_shared(X, scale, offset, output_type, quantization_axes, round_mode); auto f = make_shared(quantize, ParameterVector{X}); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto x = backend->create_tensor(input_type, input_shape); - auto y = backend->create_tensor(output_type, input_shape); - - copy_data(x, vector{9, 10, 11, -9, -10, -11, 13, 14, 15, -13, -14, -15}); + std::vector x{9, 10, 11, -9, -10, -11, 13, 14, 15, -13, -14, -15}; // divide by scale 4 4 4 4 4 4 4 4 4 4 4 4 // equals (rounded) 2 3 3 -2 -3 -3 3 4 4 -3 -4 -4 - auto handle = backend->compile(f); - handle->call_with_validate({y}, {x}); - EXPECT_EQ((vector{2, 3, 3, -2, -3, -3, 3, 4, 4, -3, -4, -4}), - read_vector(y)); + auto test_case = test::TestCase(f); + test_case.add_input({x}); + test_case.add_expected_output(input_shape, + {2, 3, 3, -2, -3, -3, 3, 4, 4, -3, -4, -4}); + test_case.run(); } NGRAPH_TEST(${BACKEND_NAME}, quantize_ROUND_NEAREST_UPWARD) @@ -739,18 +665,15 @@ NGRAPH_TEST(${BACKEND_NAME}, quantize_ROUND_NEAREST_UPWARD) make_shared(X, scale, offset, output_type, quantization_axes, round_mode); auto f = make_shared(quantize, ParameterVector{X}); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto x = backend->create_tensor(input_type, input_shape); - auto y = backend->create_tensor(output_type, input_shape); - - copy_data(x, vector{9, 10, 11, -9, -10, -11, 13, 14, 15, -13, -14, -15}); + std::vector x{9, 10, 11, -9, -10, -11, 13, 14, 15, -13, -14, -15}; // divide by scale 4 4 4 4 4 4 4 4 4 4 4 4 // equals (rounded) 2 3 3 -2 -2 -3 3 4 4 -3 -3 -4 - auto handle = backend->compile(f); - handle->call_with_validate({y}, {x}); - EXPECT_EQ((vector{2, 3, 3, -2, -2, -3, 3, 4, 4, -3, -3, -4}), - read_vector(y)); + auto test_case = test::TestCase(f); + test_case.add_input({x}); + test_case.add_expected_output(input_shape, + {2, 3, 3, -2, -2, -3, 3, 4, 4, -3, -3, -4}); + test_case.run(); } NGRAPH_TEST(${BACKEND_NAME}, quantize_ROUND_NEAREST_DOWNWARD) @@ -774,18 +697,15 @@ NGRAPH_TEST(${BACKEND_NAME}, quantize_ROUND_NEAREST_DOWNWARD) make_shared(X, scale, offset, output_type, quantization_axes, round_mode); auto f = make_shared(quantize, ParameterVector{X}); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto x = backend->create_tensor(input_type, input_shape); - auto y = backend->create_tensor(output_type, input_shape); - - copy_data(x, vector{9, 10, 11, -9, -10, -11, 13, 14, 15, -13, -14, -15}); + std::vector x{9, 10, 11, -9, -10, -11, 13, 14, 15, -13, -14, -15}; // divide by scale 4 4 4 4 4 4 4 4 4 4 4 4 // equals (rounded) 2 2 3 -2 -3 -3 3 3 4 -3 -4 -4 - auto handle = backend->compile(f); - handle->call_with_validate({y}, {x}); - EXPECT_EQ((vector{2, 2, 3, -2, -3, -3, 3, 3, 4, -3, -4, -4}), - read_vector(y)); + auto test_case = test::TestCase(f); + test_case.add_input({x}); + test_case.add_expected_output(input_shape, + {2, 2, 3, -2, -3, -3, 3, 3, 4, -3, -4, -4}); + test_case.run(); } NGRAPH_TEST(${BACKEND_NAME}, quantize_ROUND_NEAREST_TOWARD_EVEN) @@ -809,18 +729,15 @@ NGRAPH_TEST(${BACKEND_NAME}, quantize_ROUND_NEAREST_TOWARD_EVEN) make_shared(X, scale, offset, output_type, quantization_axes, round_mode); auto f = make_shared(quantize, ParameterVector{X}); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto x = backend->create_tensor(input_type, input_shape); - auto y = backend->create_tensor(output_type, input_shape); - - copy_data(x, vector{9, 10, 11, -9, -10, -11, 13, 14, 15, -13, -14, -15}); + std::vector x{9, 10, 11, -9, -10, -11, 13, 14, 15, -13, -14, -15}; // divide by scale 4 4 4 4 4 4 4 4 4 4 4 4 // equals (rounded) 2 2 3 -2 -2 -3 3 4 4 -3 -4 -4 - auto handle = backend->compile(f); - handle->call_with_validate({y}, {x}); - EXPECT_EQ((vector{2, 2, 3, -2, -2, -3, 3, 4, 4, -3, -4, -4}), - read_vector(y)); + auto test_case = test::TestCase(f); + test_case.add_input({x}); + test_case.add_expected_output(input_shape, + {2, 2, 3, -2, -2, -3, 3, 4, 4, -3, -4, -4}); + test_case.run(); } NGRAPH_TEST(${BACKEND_NAME}, quantize_ROUND_TOWARD_INFINITY) @@ -849,18 +766,15 @@ NGRAPH_TEST(${BACKEND_NAME}, quantize_ROUND_TOWARD_INFINITY) static_cast(static_cast(round_mode))); auto f = make_shared(quantize, ParameterVector{X}); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto x = backend->create_tensor(input_type, input_shape); - auto y = backend->create_tensor(output_type, input_shape); - - copy_data(x, vector{9, 10, 11, -9, -10, -11, 13, 14, 15, -13, -14, -15}); + std::vector x{9, 10, 11, -9, -10, -11, 13, 14, 15, -13, -14, -15}; // divide by scale 4 4 4 4 4 4 4 4 4 4 4 4 // equals (rounded) 3 3 3 -3 -3 -3 4 4 4 -4 -4 -4 - auto handle = backend->compile(f); - handle->call_with_validate({y}, {x}); - EXPECT_EQ((vector{3, 3, 3, -3, -3, -3, 4, 4, 4, -4, -4, -4}), - read_vector(y)); + auto test_case = test::TestCase(f); + test_case.add_input({x}); + test_case.add_expected_output(input_shape, + {3, 3, 3, -3, -3, -3, 4, 4, 4, -4, -4, -4}); + test_case.run(); } NGRAPH_TEST(${BACKEND_NAME}, quantize_ROUND_TOWARD_ZERO) @@ -889,18 +803,15 @@ NGRAPH_TEST(${BACKEND_NAME}, quantize_ROUND_TOWARD_ZERO) static_cast(static_cast(round_mode))); auto f = make_shared(quantize, ParameterVector{X}); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto x = backend->create_tensor(input_type, input_shape); - auto y = backend->create_tensor(output_type, input_shape); - - copy_data(x, vector{9, 10, 11, -9, -10, -11, 13, 14, 15, -13, -14, -15}); + std::vector x{9, 10, 11, -9, -10, -11, 13, 14, 15, -13, -14, -15}; // divide by scale 4 4 4 4 4 4 4 4 4 4 4 4 // equals (rounded) 2 2 2 -2 -2 -2 3 3 3 -3 -3 -3 - auto handle = backend->compile(f); - handle->call_with_validate({y}, {x}); - EXPECT_EQ((vector{2, 2, 2, -2, -2, -2, 3, 3, 3, -3, -3, -3}), - read_vector(y)); + auto test_case = test::TestCase(f); + test_case.add_input({x}); + test_case.add_expected_output(input_shape, + {2, 2, 2, -2, -2, -2, 3, 3, 3, -3, -3, -3}); + test_case.run(); } NGRAPH_TEST(${BACKEND_NAME}, quantize_ROUND_UP) @@ -924,18 +835,15 @@ NGRAPH_TEST(${BACKEND_NAME}, quantize_ROUND_UP) make_shared(X, scale, offset, output_type, quantization_axes, round_mode); auto f = make_shared(quantize, ParameterVector{X}); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto x = backend->create_tensor(input_type, input_shape); - auto y = backend->create_tensor(output_type, input_shape); - - copy_data(x, vector{9, 10, 11, -9, -10, -11, 13, 14, 15, -13, -14, -15}); + std::vector x{9, 10, 11, -9, -10, -11, 13, 14, 15, -13, -14, -15}; // divide by scale 4 4 4 4 4 4 4 4 4 4 4 4 // equals (rounded) 3 3 3 -2 -2 -2 4 4 4 -3 -3 -3 - auto handle = backend->compile(f); - handle->call_with_validate({y}, {x}); - EXPECT_EQ((vector{3, 3, 3, -2, -2, -2, 4, 4, 4, -3, -3, -3}), - read_vector(y)); + auto test_case = test::TestCase(f); + test_case.add_input({x}); + test_case.add_expected_output(input_shape, + {3, 3, 3, -2, -2, -2, 4, 4, 4, -3, -3, -3}); + test_case.run(); } NGRAPH_TEST(${BACKEND_NAME}, quantize_ROUND_DOWN) @@ -959,18 +867,15 @@ NGRAPH_TEST(${BACKEND_NAME}, quantize_ROUND_DOWN) make_shared(X, scale, offset, output_type, quantization_axes, round_mode); auto f = make_shared(quantize, ParameterVector{X}); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto x = backend->create_tensor(input_type, input_shape); - auto y = backend->create_tensor(output_type, input_shape); - - copy_data(x, vector{9, 10, 11, -9, -10, -11, 13, 14, 15, -13, -14, -15}); + std::vector x{9, 10, 11, -9, -10, -11, 13, 14, 15, -13, -14, -15}; // divide by scale 4 4 4 4 4 4 4 4 4 4 4 4 // equals (rounded) 2 2 2 -3 -3 -3 3 3 3 -4 -4 -4 - auto handle = backend->compile(f); - handle->call_with_validate({y}, {x}); - EXPECT_EQ((vector{2, 2, 2, -3, -3, -3, 3, 3, 3, -4, -4, -4}), - read_vector(y)); + auto test_case = test::TestCase(f); + test_case.add_input({x}); + test_case.add_expected_output(input_shape, + {2, 2, 2, -3, -3, -3, 3, 3, 3, -4, -4, -4}); + test_case.run(); } NGRAPH_TEST(${BACKEND_NAME}, dequantize_dynamic_offset) @@ -991,21 +896,17 @@ NGRAPH_TEST(${BACKEND_NAME}, dequantize_dynamic_offset) auto dequantize = make_shared(X, scale, offset, output_type, quantization_axes); auto f = make_shared(dequantize, ParameterVector{X, scale, offset}); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto x = backend->create_tensor(input_type, input_shape); - auto y = backend->create_tensor(output_type, input_shape); - auto Scale = backend->create_tensor(output_type, scale_offset_shape); - auto Offset = backend->create_tensor(input_type, scale_offset_shape); + std::vector x{0, 3, 128, 255}; + std::vector Scale{2}; + std::vector Offset{128}; - copy_data(x, vector{0, 3, 128, 255}); - copy_data(Scale, vector{2}); - copy_data(Offset, vector{128}); + auto test_case = test::TestCase(f); + test_case.add_input({x}); + test_case.add_input({Scale}); + test_case.add_input({Offset}); - auto handle = backend->compile(f); - handle->call_with_validate({y}, {x, Scale, Offset}); - EXPECT_TRUE(test::all_close_f((vector{-256.0f, -250.0f, 0.0f, 254.0f}), - read_vector(y), - MIN_FLOAT_TOLERANCE_BITS)); + test_case.add_expected_output(input_shape, {-256.0f, -250.0f, 0.0f, 254.0f}); + test_case.run(MIN_FLOAT_TOLERANCE_BITS); } NGRAPH_TEST(${BACKEND_NAME}, quantize_dynamic_offset) @@ -1029,22 +930,19 @@ NGRAPH_TEST(${BACKEND_NAME}, quantize_dynamic_offset) make_shared(X, scale, offset, output_type, quantization_axes, round_mode); auto f = make_shared(quantize, ParameterVector{X, scale, offset}); - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto x = backend->create_tensor(input_type, input_shape); - auto y = backend->create_tensor(output_type, input_shape); - auto Scale = backend->create_tensor(input_type, scale_offset_shape); - auto Offset = backend->create_tensor(output_type, scale_offset_shape); - - copy_data(x, vector{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}); + std::vector x{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}; // divide by scale 2 2 2 2 2 2 2 2 2 2 2 2 // equals (rounded) 0 0 1 2 2 2 3 4 4 4 5 6 // plus offset 1 1 1 1 1 1 1 1 1 1 1 1 // equals 1 1 2 3 3 3 4 5 5 5 6 7 - copy_data(Scale, vector{2}); - copy_data(Offset, vector{1}); + std::vector Scale{2}; + std::vector Offset{1}; - auto handle = backend->compile(f); - handle->call_with_validate({y}, {x, Scale, Offset}); - EXPECT_EQ((vector{1, 1, 2, 3, 3, 3, 4, 5, 5, 5, 6, 7}), - read_vector(y)); + auto test_case = test::TestCase(f); + test_case.add_input({x}); + test_case.add_input({Scale}); + test_case.add_input({Offset}); + + test_case.add_expected_output(input_shape, {1, 1, 2, 3, 3, 3, 4, 5, 5, 5, 6, 7}); + test_case.run(); }