Add unit tests for Convert operation (#5558)

* Add Serialization SLT for Convert op.

* Add comment with explanaition to convert ref impl.

* Refactored backend tests for Convert operation.

* Give better names to backend tests.

* Add more backend unit tests.

* Fixed tests related to u1/u4/i4 types.
This commit is contained in:
Jozef Daniecki 2021-05-12 13:35:12 +02:00 committed by GitHub
parent abd663463d
commit 2ba5c344be
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 454 additions and 106 deletions

View File

@ -0,0 +1,37 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "shared_test_classes/single_layer/convert.hpp"
#include <vector>
using namespace LayerTestsDefinitions;
namespace {
const std::vector<std::vector<size_t>> inShape = {{1, 2, 3, 4}};
const std::vector<InferenceEngine::Precision> precisions = {
InferenceEngine::Precision::BOOL, InferenceEngine::Precision::U8,
InferenceEngine::Precision::I8, InferenceEngine::Precision::U16,
InferenceEngine::Precision::I16, InferenceEngine::Precision::U32,
InferenceEngine::Precision::I32, InferenceEngine::Precision::U64,
InferenceEngine::Precision::I64, InferenceEngine::Precision::BF16,
InferenceEngine::Precision::FP16, InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP64};
TEST_P(ConvertLayerTest, Serialize) {
Serialize();
}
INSTANTIATE_TEST_CASE_P(
smoke_Serialization_ConvertLayerTest, ConvertLayerTest,
::testing::Combine(::testing::Values(inShape),
::testing::ValuesIn(precisions),
::testing::ValuesIn(precisions),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(CommonTestUtils::DEVICE_CPU)),
ConvertLayerTest::getTestCaseName);
} // namespace

View File

@ -29,6 +29,7 @@ namespace ngraph
template <>
void convert<float16, float>(const float16* arg, float* out, size_t count);
// overload to handle ngraph::boolean (it is stored as char)
template <typename TI, typename TO>
typename std::enable_if<std::is_same<TO, char>::value>::type
convert(const TI* arg, TO* out, size_t count)

View File

@ -26,6 +26,23 @@ op::Convert::Convert(const Output<Node>& arg, const element::Type& destination_t
void op::Convert::validate_and_infer_types()
{
NGRAPH_OP_SCOPE(v0_Convert_validate_and_infer_types);
const element::Type data_et = get_input_element_type(0);
const element::Type destination_et = m_destination_type;
NODE_VALIDATION_CHECK(this,
data_et != element::u1 && data_et != element::u4 &&
data_et != element::i4,
"Input element type '",
data_et,
"' is not supported.");
NODE_VALIDATION_CHECK(this,
destination_et != element::u1 && destination_et != element::u4 &&
destination_et != element::i4,
"Destination element type '",
destination_et,
"' is not supported.");
set_output_type(0, m_destination_type, get_input_partial_shape(0));
}

View File

@ -21,138 +21,421 @@ using namespace ngraph;
static string s_manifest = "${MANIFEST}";
using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME});
NGRAPH_TEST(${BACKEND_NAME}, convert_int32_float32)
namespace
{
Shape shape{2, 2};
auto A = make_shared<op::Parameter>(element::i32, shape);
auto f = make_shared<Function>(make_shared<op::Convert>(A, element::f32), ParameterVector{A});
template <typename T_IN, typename T_OUT>
void ConvertTest(const std::vector<T_IN>& input,
const Shape& input_shape,
const ngraph::element::Type& input_type,
const std::vector<T_OUT>& expected_output,
const ngraph::element::Type& expected_output_type)
{
const auto in = make_shared<op::Parameter>(input_type, input_shape);
const auto convert = make_shared<op::Convert>(in, expected_output_type);
const auto f = make_shared<Function>(NodeVector{convert}, ParameterVector{in});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
auto test_case = test::TestCase<TestEngine>(f);
test_case.add_input(input);
test_case.add_expected_output(expected_output);
// Create some tensors for input/output
auto a = backend->create_tensor(element::i32, shape);
copy_data(a, vector<int32_t>{281, 2, 3, 4});
auto result = backend->create_tensor(element::f32, shape);
test_case.run();
}
} // namespace
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_TRUE(test::all_close_f((vector<float>{281, 2, 3, 4}), read_vector<float>(result)));
// destination: boolean
NGRAPH_TEST(${BACKEND_NAME}, convert_u8_to_boolean)
{
const uint8_t lowest = std::numeric_limits<uint8_t>::lowest();
const uint8_t max = std::numeric_limits<uint8_t>::max();
const std::vector<uint8_t> input{0, 12, 23, 0, lowest, max};
const Shape input_shape{2, 3};
const element::Type input_type = ngraph::element::u8;
const std::vector<char> expected_output{0, 1, 1, 0, 0, 1};
const element::Type expected_output_type = ngraph::element::boolean;
ConvertTest(input, input_shape, input_type, expected_output, expected_output_type);
}
NGRAPH_TEST(${BACKEND_NAME}, convert_uint16_float32)
NGRAPH_TEST(${BACKEND_NAME}, convert_i32_to_boolean)
{
Shape shape{2, 2};
auto A = make_shared<op::Parameter>(element::u16, shape);
auto f = make_shared<Function>(make_shared<op::Convert>(A, element::f32), ParameterVector{A});
const int32_t lowest = std::numeric_limits<int32_t>::lowest();
const int32_t max = std::numeric_limits<int32_t>::max();
auto backend = runtime::Backend::create("${BACKEND_NAME}");
const std::vector<int32_t> input{0, -12, 23, 0, lowest, max};
const Shape input_shape{2, 3};
const element::Type input_type = ngraph::element::i32;
// Create some tensors for input/output
auto a = backend->create_tensor(element::u16, shape);
copy_data(a, vector<uint16_t>{1, 2, 3, 4});
auto result = backend->create_tensor(element::f32, shape);
const std::vector<char> expected_output{0, 1, 1, 0, 1, 1};
const element::Type expected_output_type = ngraph::element::boolean;
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_TRUE(test::all_close_f(
(vector<float>{1, 2, 3, 4}), read_vector<float>(result), MIN_FLOAT_TOLERANCE_BITS));
ConvertTest(input, input_shape, input_type, expected_output, expected_output_type);
}
NGRAPH_TEST(${BACKEND_NAME}, convert_int32_bool)
NGRAPH_TEST(${BACKEND_NAME}, convert_f32_to_boolean)
{
Shape shape{2, 3};
auto A = make_shared<op::Parameter>(element::i32, shape);
auto f =
make_shared<Function>(make_shared<op::Convert>(A, element::boolean), ParameterVector{A});
const float lowest = std::numeric_limits<float>::lowest();
const float max = std::numeric_limits<float>::max();
const float min = std::numeric_limits<float>::min();
const float pos_inf = std::numeric_limits<float>::infinity();
const float neg_inf = -std::numeric_limits<float>::infinity();
auto backend = runtime::Backend::create("${BACKEND_NAME}");
const std::vector<float> input{0.f, 1.5745f, 0.12352f, 0.f, lowest, max, min, pos_inf, neg_inf};
const Shape input_shape{3, 3};
const element::Type input_type = ngraph::element::f32;
int32_t lowest = std::numeric_limits<int32_t>::lowest();
int32_t max = std::numeric_limits<int32_t>::max();
const std::vector<char> expected_output{0, 1, 1, 0, 1, 1, 1, 1, 1};
const element::Type expected_output_type = ngraph::element::boolean;
// Create some tensors for input/output
auto a = backend->create_tensor(element::i32, shape);
copy_data(a, vector<int32_t>{0, 12, 23, 0, lowest, max});
auto result = backend->create_tensor(element::boolean, shape);
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_EQ((vector<char>{0, 1, 1, 0, 1, 1}), read_vector<char>(result));
ConvertTest(input, input_shape, input_type, expected_output, expected_output_type);
}
NGRAPH_TEST(${BACKEND_NAME}, convert_float32_bool)
// destination: bf16
NGRAPH_TEST(${BACKEND_NAME}, convert_f32_to_bf16)
{
Shape shape{3, 3};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto f =
make_shared<Function>(make_shared<op::Convert>(A, element::boolean), ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
float lowest = std::numeric_limits<float>::lowest();
float max = std::numeric_limits<float>::max();
float min = std::numeric_limits<float>::min();
float pos_inf = std::numeric_limits<float>::infinity();
float neg_inf = -std::numeric_limits<float>::infinity();
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
copy_data(a, vector<float>{0.f, 1.5745f, 0.12352f, 0.f, lowest, max, min, pos_inf, neg_inf});
auto result = backend->create_tensor(element::boolean, shape);
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_EQ((vector<char>{0, 1, 1, 0, 1, 1, 1, 1, 1}), read_vector<char>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, convert_float32_bf16)
{
const vector<float> a_data = {
const std::vector<float> input{
0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f};
const Shape input_shape{1, 1, 3, 5};
const element::Type input_type = ngraph::element::f32;
const auto A = make_shared<op::Parameter>(element::f32, Shape{1, 1, 3, 5});
const auto convert = make_shared<op::Convert>(A, element::bf16);
const auto f = make_shared<Function>(NodeVector{convert}, ParameterVector{A});
const std::vector<bfloat16> expected_output(std::begin(input), std::end(input));
const element::Type expected_output_type = ngraph::element::bf16;
auto test_case = test::TestCase<TestEngine>(f);
test_case.add_input<float>(a_data);
test_case.add_expected_output<bfloat16>(
std::vector<bfloat16>(std::begin(a_data), std::end(a_data)));
test_case.run();
ConvertTest(input, input_shape, input_type, expected_output, expected_output_type);
}
NGRAPH_TEST(${BACKEND_NAME}, convert_bf16_float32)
// destination: f16
NGRAPH_TEST(${BACKEND_NAME}, convert_u8_to_f16)
{
const vector<bfloat16> a_data = {
const std::vector<uint8_t> input{0, 10, 15, 20, 43, 56, 78, 99, 102, 130, 142};
const Shape input_shape{11};
const element::Type input_type = ngraph::element::u8;
const std::vector<float16> expected_output{0, 10, 15, 20, 43, 56, 78, 99, 102, 130, 142};
const element::Type expected_output_type = ngraph::element::f16;
ConvertTest(input, input_shape, input_type, expected_output, expected_output_type);
}
// destination: f32
NGRAPH_TEST(${BACKEND_NAME}, convert_i4_to_f32_is_not_supported_yet)
{
const std::vector<int8_t> input{0x00, 0x00};
const Shape input_shape{2, 2};
const element::Type input_type = ngraph::element::i4;
const std::vector<float> expected_output{0.0f, 0.0f, 0.0f, 0.0f};
const element::Type expected_output_type = ngraph::element::f32;
ASSERT_THROW(ConvertTest(input, input_shape, input_type, expected_output, expected_output_type),
ngraph::NodeValidationFailure);
}
NGRAPH_TEST(${BACKEND_NAME}, convert_i8_to_f32)
{
const std::vector<int8_t> input{-127, -0, 0, 127};
const Shape input_shape{2, 2};
const element::Type input_type = ngraph::element::i8;
const std::vector<float> expected_output{-127.0f, -0.0f, 0.0f, 127.0f};
const element::Type expected_output_type = ngraph::element::f32;
ConvertTest(input, input_shape, input_type, expected_output, expected_output_type);
}
NGRAPH_TEST(${BACKEND_NAME}, convert_i16_to_f32)
{
const std::vector<int16_t> input{-32000, -0, 0, 32000};
const Shape input_shape{2, 2};
const element::Type input_type = ngraph::element::i16;
const std::vector<float> expected_output{-32000.0f, -0.0f, 0.0f, 32000.0f};
const element::Type expected_output_type = ngraph::element::f32;
ConvertTest(input, input_shape, input_type, expected_output, expected_output_type);
}
NGRAPH_TEST(${BACKEND_NAME}, convert_i32_to_f32)
{
const std::vector<int32_t> input{-64000, -0, 0, 64000};
const Shape input_shape{2, 2};
const element::Type input_type = ngraph::element::i32;
const std::vector<float> expected_output{-64000.0f, -0.0f, 0.0f, 64000.0f};
const element::Type expected_output_type = ngraph::element::f32;
ConvertTest(input, input_shape, input_type, expected_output, expected_output_type);
}
NGRAPH_TEST(${BACKEND_NAME}, convert_i64_to_f32)
{
const std::vector<int64_t> input{-64000, -0, 0, 64000};
const Shape input_shape{2, 2};
const element::Type input_type = ngraph::element::i64;
const std::vector<float> expected_output{-64000.0f, -0.0f, 0.0f, 64000.0f};
const element::Type expected_output_type = ngraph::element::f32;
ConvertTest(input, input_shape, input_type, expected_output, expected_output_type);
}
NGRAPH_TEST(${BACKEND_NAME}, convert_u1_to_f32_is_not_supported_yet)
{
const std::vector<uint8_t> input{0x00};
const Shape input_shape{2, 2};
const element::Type input_type = ngraph::element::u1;
const std::vector<float> expected_output{0.0f, 0.0f, 0.0f, 0.0f};
const element::Type expected_output_type = ngraph::element::f32;
ASSERT_THROW(ConvertTest(input, input_shape, input_type, expected_output, expected_output_type),
ngraph::NodeValidationFailure);
}
NGRAPH_TEST(${BACKEND_NAME}, convert_u4_to_f32_is_not_supported_yet)
{
const std::vector<uint8_t> input{0x00, 0x00};
const Shape input_shape{2, 2};
const element::Type input_type = ngraph::element::u4;
const std::vector<float> expected_output{0.0f, 0.0f, 0.0f, 0.0f};
const element::Type expected_output_type = ngraph::element::f32;
ASSERT_THROW(ConvertTest(input, input_shape, input_type, expected_output, expected_output_type),
ngraph::NodeValidationFailure);
}
NGRAPH_TEST(${BACKEND_NAME}, convert_u8_to_f32)
{
const std::vector<uint8_t> input{255, 128, 32, 0};
const Shape input_shape{2, 2};
const element::Type input_type = ngraph::element::u8;
const std::vector<float> expected_output{255.0f, 128.0f, 32.0f, 0.0f};
const element::Type expected_output_type = ngraph::element::f32;
ConvertTest(input, input_shape, input_type, expected_output, expected_output_type);
}
NGRAPH_TEST(${BACKEND_NAME}, convert_u16_to_f32)
{
const std::vector<uint16_t> input{64000, 32000, 128, 0};
const Shape input_shape{2, 2};
const element::Type input_type = ngraph::element::u16;
const std::vector<float> expected_output{64000.0f, 32000.0f, 128.0f, 0.0f};
const element::Type expected_output_type = ngraph::element::f32;
ConvertTest(input, input_shape, input_type, expected_output, expected_output_type);
}
NGRAPH_TEST(${BACKEND_NAME}, convert_u32_to_f32)
{
const std::vector<uint32_t> input{4000000, 2000000, 128, 0};
const Shape input_shape{2, 2};
const element::Type input_type = ngraph::element::u32;
const std::vector<float> expected_output{4000000.0f, 2000000.0f, 128.0f, 0.0f};
const element::Type expected_output_type = ngraph::element::f32;
ConvertTest(input, input_shape, input_type, expected_output, expected_output_type);
}
NGRAPH_TEST(${BACKEND_NAME}, convert_u64_to_f32)
{
const std::vector<uint64_t> input{4000000, 2000000, 128, 0};
const Shape input_shape{2, 2};
const element::Type input_type = ngraph::element::u64;
const std::vector<float> expected_output{4000000.0f, 2000000.0f, 128.0f, 0.0f};
const element::Type expected_output_type = ngraph::element::f32;
ConvertTest(input, input_shape, input_type, expected_output, expected_output_type);
}
NGRAPH_TEST(${BACKEND_NAME}, convert_bf16_to_f32)
{
const std::vector<bfloat16> input{
0.5, 1.5, 0.5, 2.5, 1.5, 0.5, 3.5, 2.5, 0.5, 0.5, 2.5, 0.5, 0.5, 0.5, 1.5};
const Shape input_shape{1, 1, 3, 5};
const element::Type input_type = ngraph::element::bf16;
const auto A = make_shared<op::Parameter>(element::bf16, Shape{1, 1, 3, 5});
const auto convert = make_shared<op::Convert>(A, element::f32);
const auto f = make_shared<Function>(NodeVector{convert}, ParameterVector{A});
const std::vector<float> expected_output(std::begin(input), std::end(input));
const element::Type expected_output_type = ngraph::element::f32;
auto test_case = test::TestCase<TestEngine>(f);
test_case.add_input<bfloat16>(a_data);
test_case.add_expected_output<float>(std::vector<float>(std::begin(a_data), std::end(a_data)));
test_case.run();
ConvertTest(input, input_shape, input_type, expected_output, expected_output_type);
}
NGRAPH_TEST(${BACKEND_NAME}, convert_fp16_float32)
NGRAPH_TEST(${BACKEND_NAME}, convert_f16_to_f32)
{
std::vector<float> f32vec = {-20.5, -15, -10.5, -0.5, 0, 0.5, 10.5, 15, 20.5};
std::vector<float16> f16vec(std::begin(f32vec), std::end(f32vec));
std::vector<float> result(f32vec.size());
runtime::reference::convert(f16vec.data(), result.data(), f32vec.size());
EXPECT_EQ(result, f32vec);
const std::vector<float16> input{-20.5, -15, -10.5, -0.5, 0, 0.5, 10.5, 15, 20.5};
const Shape input_shape{3, 3};
const element::Type input_type = ngraph::element::f16;
const std::vector<float> expected_output{-20.5, -15, -10.5, -0.5, 0, 0.5, 10.5, 15, 20.5};
const element::Type expected_output_type = ngraph::element::f32;
ConvertTest(input, input_shape, input_type, expected_output, expected_output_type);
}
NGRAPH_TEST(${BACKEND_NAME}, convert_uint8_fp16)
NGRAPH_TEST(${BACKEND_NAME}, convert_f32_to_f32)
{
std::vector<uint8_t> u8vec = {0, 10, 15, 20, 43, 56, 78, 99, 102, 130, 142};
std::vector<float16> f16vec(std::begin(u8vec), std::end(u8vec));
std::vector<float16> result(u8vec.size());
runtime::reference::convert(u8vec.data(), result.data(), u8vec.size());
EXPECT_EQ(result, f16vec);
const std::vector<float> input{-20.5, -15, -10.5, -0.5, 0, 0.5, 10.5, 15, 20.5};
const Shape input_shape{3, 3};
const element::Type input_type = ngraph::element::f32;
const std::vector<float> expected_output{-20.5, -15, -10.5, -0.5, 0, 0.5, 10.5, 15, 20.5};
const element::Type expected_output_type = ngraph::element::f32;
ConvertTest(input, input_shape, input_type, expected_output, expected_output_type);
}
// destination: f64
// not supported by IE, hence no tests
// destination: i4
NGRAPH_TEST(${BACKEND_NAME}, convert_u8_to_i4_is_not_supported_yet)
{
const std::vector<uint8_t> input{0, 0, 0, 0};
const Shape input_shape{4};
const element::Type input_type = ngraph::element::u8;
const std::vector<uint8_t> expected_output{0x00, 0x00};
const element::Type expected_output_type = ngraph::element::i4;
ASSERT_THROW(ConvertTest(input, input_shape, input_type, expected_output, expected_output_type),
ngraph::NodeValidationFailure);
}
// destination: i8
NGRAPH_TEST(${BACKEND_NAME}, convert_u8_to_i8)
{
const std::vector<uint8_t> input{0, 10, 15, 20, 43, 56, 78, 99, 102, 110, 128};
const Shape input_shape{11};
const element::Type input_type = ngraph::element::u8;
const std::vector<int8_t> expected_output{0, 10, 15, 20, 43, 56, 78, 99, 102, 110, 127};
const element::Type expected_output_type = ngraph::element::i8;
ConvertTest(input, input_shape, input_type, expected_output, expected_output_type);
}
// destination: i16
NGRAPH_TEST(${BACKEND_NAME}, convert_u8_to_i16)
{
const std::vector<uint8_t> input{0, 10, 15, 20, 43, 56, 78, 99, 102, 130, 142};
const Shape input_shape{11};
const element::Type input_type = ngraph::element::u8;
const std::vector<int16_t> expected_output{0, 10, 15, 20, 43, 56, 78, 99, 102, 130, 142};
const element::Type expected_output_type = ngraph::element::i16;
ConvertTest(input, input_shape, input_type, expected_output, expected_output_type);
}
// destination: i32
NGRAPH_TEST(${BACKEND_NAME}, convert_u8_to_i32)
{
const std::vector<uint8_t> input{0, 10, 15, 20, 43, 56, 78, 99, 102, 130, 142};
const Shape input_shape{11};
const element::Type input_type = ngraph::element::u8;
const std::vector<int32_t> expected_output{0, 10, 15, 20, 43, 56, 78, 99, 102, 130, 142};
const element::Type expected_output_type = ngraph::element::i32;
ConvertTest(input, input_shape, input_type, expected_output, expected_output_type);
}
// destination: i64
NGRAPH_TEST(${BACKEND_NAME}, convert_u8_to_i64)
{
const std::vector<uint8_t> input{0, 10, 15, 20, 43, 56, 78, 99, 102, 130, 142};
const Shape input_shape{11};
const element::Type input_type = ngraph::element::u8;
const std::vector<int64_t> expected_output{0, 10, 15, 20, 43, 56, 78, 99, 102, 130, 142};
const element::Type expected_output_type = ngraph::element::i64;
ConvertTest(input, input_shape, input_type, expected_output, expected_output_type);
}
// destination: u1
NGRAPH_TEST(${BACKEND_NAME}, convert_u8_to_u1_is_not_supported_yet)
{
const std::vector<uint8_t> input{0, 0, 0, 0};
const Shape input_shape{4};
const element::Type input_type = ngraph::element::u8;
const std::vector<uint8_t> expected_output{0x00};
const element::Type expected_output_type = ngraph::element::u1;
ASSERT_THROW(ConvertTest(input, input_shape, input_type, expected_output, expected_output_type),
ngraph::NodeValidationFailure);
}
// destination: u4
NGRAPH_TEST(${BACKEND_NAME}, convert_u8_to_u4_is_not_supported_yet)
{
const std::vector<uint8_t> input{0, 0, 0, 0};
const Shape input_shape{4};
const element::Type input_type = ngraph::element::u8;
const std::vector<uint8_t> expected_output{0x00, 0x00};
const element::Type expected_output_type = ngraph::element::u4;
ASSERT_THROW(ConvertTest(input, input_shape, input_type, expected_output, expected_output_type),
ngraph::NodeValidationFailure);
}
// destination: u8
NGRAPH_TEST(${BACKEND_NAME}, convert_u8_to_u8)
{
const std::vector<uint8_t> input{0, 10, 15, 20, 43, 56, 78, 99, 102, 110, 127};
const Shape input_shape{11};
const element::Type input_type = ngraph::element::u8;
const std::vector<uint8_t> expected_output{0, 10, 15, 20, 43, 56, 78, 99, 102, 110, 127};
const element::Type expected_output_type = ngraph::element::u8;
ConvertTest(input, input_shape, input_type, expected_output, expected_output_type);
}
// destination: u16
NGRAPH_TEST(${BACKEND_NAME}, convert_u8_to_u16)
{
const std::vector<uint8_t> input{0, 10, 15, 20, 43, 56, 78, 99, 102, 110, 127};
const Shape input_shape{11};
const element::Type input_type = ngraph::element::u8;
const std::vector<uint16_t> expected_output{0, 10, 15, 20, 43, 56, 78, 99, 102, 110, 127};
const element::Type expected_output_type = ngraph::element::u16;
ConvertTest(input, input_shape, input_type, expected_output, expected_output_type);
}
// destination: u32
NGRAPH_TEST(${BACKEND_NAME}, convert_u8_to_u32)
{
const std::vector<uint8_t> input{0, 10, 15, 20, 43, 56, 78, 99, 102, 110, 127};
const Shape input_shape{11};
const element::Type input_type = ngraph::element::u8;
const std::vector<uint32_t> expected_output{0, 10, 15, 20, 43, 56, 78, 99, 102, 110, 127};
const element::Type expected_output_type = ngraph::element::u32;
ConvertTest(input, input_shape, input_type, expected_output, expected_output_type);
}
// destination: u64
NGRAPH_TEST(${BACKEND_NAME}, convert_u8_to_u64)
{
const std::vector<uint8_t> input{0, 10, 15, 20, 43, 56, 78, 99, 102, 110, 127};
const Shape input_shape{11};
const element::Type input_type = ngraph::element::u8;
const std::vector<uint64_t> expected_output{0, 10, 15, 20, 43, 56, 78, 99, 102, 110, 127};
const element::Type expected_output_type = ngraph::element::u64;
ConvertTest(input, input_shape, input_type, expected_output, expected_output_type);
}

View File

@ -53,7 +53,6 @@ onnx_model_addmul_abc
IE_CPU.interpolate_down_scales_const_linear
# data [<name>] doesn't exist
convert_float32_bool
broadcast_trivial
aliased_output
bool_init_raw
@ -824,9 +823,6 @@ cum_sum_2dim
cum_sum_3d
cum_sum_2dim_allmodes
# Cannot create MKLDNNMemoryDesc from TensorDesc. Unsupported precision!
convert_uint16_float32
# Unsupported primitive of type: Ceiling
ceiling
@ -886,7 +882,6 @@ strided_slice_stride_optional
divide_int32
divide_cpp_rounding_int32
divide_python_rounding_int32
convert_int32_bool
lesseq_int32
# Constant and Low Precision
@ -1056,6 +1051,22 @@ roll_3d_input
roll_3d_input_negative_shift
roll_negative_axes
# convert operation
IE_CPU.convert_f16_to_f32
IE_CPU.convert_u8_to_f16
IE_CPU.convert_u8_to_i16
IE_CPU.convert_u8_to_i64
IE_CPU.convert_u8_to_u16
IE_CPU.convert_u8_to_u32
IE_CPU.convert_u8_to_u64
IE_CPU.convert_u8_to_boolean
IE_CPU.convert_i32_to_boolean
IE_CPU.convert_f32_to_boolean
IE_CPU.convert_u32_to_f32 # NOT_IMPLEMENTED
IE_CPU.convert_i4_to_f32 # NOT_IMPLEMENTED
IE_CPU.convert_u1_to_f32 # NOT_IMPLEMENTED
IE_CPU.convert_u4_to_f32 # NOT_IMPLEMENTED
#-------------------------------------------------------------------------------
#
# Inference Engine CPU plugin excludes
@ -1418,7 +1429,6 @@ IE_GPU.divide_overload
IE_GPU.divide_by_zero_float32
IE_GPU.cosh
IE_GPU.cos
IE_GPU.convert_int32_float32
IE_GPU.concat_negative_axis
IE_GPU.concat_matrix_colwise
IE_GPU.concat_matrix_rowwise