From 5de5f4d7d1b2860433f27b05bc622ff6aac3e3a3 Mon Sep 17 00:00:00 2001 From: Patryk Elszkowski Date: Tue, 27 Apr 2021 06:09:23 +0200 Subject: [PATCH] Constant op SLT (#5349) * add SLT for Constant OP * add test for U4 and I4 * drop test for BIN data Co-authored-by: Patryk Elszkowski --- .../serialization/single_layer/constant.cpp | 56 ++++++ .../single_layer_tests/constant.cpp | 54 ++++++ .../include/single_layer_tests/constant.hpp | 15 ++ .../single_layer/constant.hpp | 32 ++++ .../src/single_layer/constant.cpp | 45 +++++ .../functional_test_utils/precision_utils.hpp | 4 + .../src/utils/ngraph_helpers.cpp | 175 +++++++++++++++++- 7 files changed, 377 insertions(+), 4 deletions(-) create mode 100644 inference-engine/tests/functional/inference_engine/serialization/single_layer/constant.cpp create mode 100644 inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/constant.cpp create mode 100644 inference-engine/tests/functional/plugin/shared/include/single_layer_tests/constant.hpp create mode 100644 inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/constant.hpp create mode 100644 inference-engine/tests/functional/shared_test_classes/src/single_layer/constant.cpp diff --git a/inference-engine/tests/functional/inference_engine/serialization/single_layer/constant.cpp b/inference-engine/tests/functional/inference_engine/serialization/single_layer/constant.cpp new file mode 100644 index 00000000000..4d4cce07bf0 --- /dev/null +++ b/inference-engine/tests/functional/inference_engine/serialization/single_layer/constant.cpp @@ -0,0 +1,56 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "shared_test_classes/single_layer/constant.hpp" + +#include + +using namespace LayerTestsDefinitions; + +namespace { +TEST_P(ConstantLayerTest, Serialize) { + Serialize(); +} + +std::vector> shapes = { + {2, 2, 3}, + {3, 4, 1}, + {1, 1, 12}, +}; + +std::vector precisions = { + InferenceEngine::Precision::BIN, InferenceEngine::Precision::BF16, + InferenceEngine::Precision::FP16, InferenceEngine::Precision::FP32, + InferenceEngine::Precision::FP64, InferenceEngine::Precision::U4, + InferenceEngine::Precision::U8, InferenceEngine::Precision::U16, + InferenceEngine::Precision::U32, InferenceEngine::Precision::I4, + InferenceEngine::Precision::I8, InferenceEngine::Precision::I16, + InferenceEngine::Precision::I32, +}; + +std::vector data = {"1", "2", "3", "4", "5", "6", "7", "1", "2", "3", "4", "5"}; + +std::vector precisionsWithNegativeValues = { + InferenceEngine::Precision::BF16, InferenceEngine::Precision::FP16, + InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP64, + InferenceEngine::Precision::I4, InferenceEngine::Precision::I8, + InferenceEngine::Precision::I16, InferenceEngine::Precision::I32, +}; + +std::vector dataWithNegativeValues = {"1", "-2", "3", "-4", "5", "-6", + "7", "-1", "2", "-3", "4", "-5"}; + +INSTANTIATE_TEST_CASE_P(smoke_Constant_Serialization, ConstantLayerTest, + ::testing::Combine(::testing::ValuesIn(shapes), + ::testing::ValuesIn(precisions), ::testing::Values(data), + ::testing::Values(CommonTestUtils::DEVICE_CPU)), + ConstantLayerTest::getTestCaseName); + +INSTANTIATE_TEST_CASE_P(smoke_Constant_Serialization_with_negative_values, ConstantLayerTest, + ::testing::Combine(::testing::ValuesIn(shapes), + ::testing::ValuesIn(precisionsWithNegativeValues), + ::testing::Values(dataWithNegativeValues), + ::testing::Values(CommonTestUtils::DEVICE_CPU)), + ConstantLayerTest::getTestCaseName); +} // namespace diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/constant.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/constant.cpp new file mode 100644 index 00000000000..06ee157d391 --- /dev/null +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/constant.cpp @@ -0,0 +1,54 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "single_layer_tests/constant.hpp" + +#include + +#include "common_test_utils/test_constants.hpp" + +using namespace LayerTestsDefinitions; + +namespace { + +std::vector> shapes{ + {2, 2, 3}, + {3, 4, 1}, + {1, 1, 12}, +}; + +std::vector precisions{ + InferenceEngine::Precision::BF16, InferenceEngine::Precision::FP16, + InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP64, + InferenceEngine::Precision::U4, InferenceEngine::Precision::U8, + InferenceEngine::Precision::U16, InferenceEngine::Precision::U32, + InferenceEngine::Precision::I4, InferenceEngine::Precision::I8, + InferenceEngine::Precision::I16, InferenceEngine::Precision::I32, +}; + +std::vector data{"0", "1", "2", "3", "4", "5", "6", "7", "0", "1", "2", "3"}; + +std::vector precisionsWithNegativeValues{ + InferenceEngine::Precision::BF16, InferenceEngine::Precision::FP16, + InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP64, + InferenceEngine::Precision::I4, InferenceEngine::Precision::I8, + InferenceEngine::Precision::I16, InferenceEngine::Precision::I32, +}; + +std::vector dataWithNegativeValues{"1", "-2", "3", "-4", "5", "-6", + "7", "-1", "2", "-3", "4", "-5"}; + +INSTANTIATE_TEST_CASE_P(smoke_Constant, ConstantLayerTest, + ::testing::Combine(::testing::ValuesIn(shapes), + ::testing::ValuesIn(precisions), ::testing::Values(data), + ::testing::Values(CommonTestUtils::DEVICE_CPU)), + ConstantLayerTest::getTestCaseName); + +INSTANTIATE_TEST_CASE_P(smoke_Constant_with_negative_values, ConstantLayerTest, + ::testing::Combine(::testing::ValuesIn(shapes), + ::testing::ValuesIn(precisionsWithNegativeValues), + ::testing::Values(dataWithNegativeValues), + ::testing::Values(CommonTestUtils::DEVICE_CPU)), + ConstantLayerTest::getTestCaseName); +} // namespace diff --git a/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/constant.hpp b/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/constant.hpp new file mode 100644 index 00000000000..5c806dc8706 --- /dev/null +++ b/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/constant.hpp @@ -0,0 +1,15 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "shared_test_classes/single_layer/constant.hpp" + +namespace LayerTestsDefinitions { + +TEST_P(ConstantLayerTest, CompareWithRefs) { + Run(); +}; + +} // namespace LayerTestsDefinitions diff --git a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/constant.hpp b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/constant.hpp new file mode 100644 index 00000000000..8b821f9bbb2 --- /dev/null +++ b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/constant.hpp @@ -0,0 +1,32 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include + +#include "ngraph_functions/builders.hpp" +#include "ngraph_functions/utils/ngraph_helpers.hpp" +#include "shared_test_classes/base/layer_test_utils.hpp" + +namespace LayerTestsDefinitions { + +using constantParamsTuple = typename std::tuple< + std::vector, // Constant data shape + InferenceEngine::Precision, // Constant data precision + std::vector, // Constant elements + std::string>; // Device name + +class ConstantLayerTest : public testing::WithParamInterface, + virtual public LayerTestsUtils::LayerTestsCommon { +public: + static std::string getTestCaseName(const testing::TestParamInfo &obj); + +protected: + void SetUp() override; +}; + +} // namespace LayerTestsDefinitions diff --git a/inference-engine/tests/functional/shared_test_classes/src/single_layer/constant.cpp b/inference-engine/tests/functional/shared_test_classes/src/single_layer/constant.cpp new file mode 100644 index 00000000000..abd37314759 --- /dev/null +++ b/inference-engine/tests/functional/shared_test_classes/src/single_layer/constant.cpp @@ -0,0 +1,45 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "shared_test_classes/single_layer/constant.hpp" + +namespace LayerTestsDefinitions { +namespace { +template +std::vector getElements(const std::vector& v) { + const auto new_size = std::min(N, v.size()); + return {begin(v), std::next(begin(v), new_size)}; +} +} // namespace + +std::string ConstantLayerTest::getTestCaseName( + const testing::TestParamInfo& obj) { + std::vector data_shape; + InferenceEngine::Precision data_precision; + std::vector data_elements; + std::string targetName; + + std::tie(data_shape, data_precision, data_elements, targetName) = obj.param; + + std::ostringstream result; + result << "S=" << CommonTestUtils::vec2str(data_shape) << "_"; + result << "dataPRC=" << data_precision.name() << "_"; + result << "dataValue=" << CommonTestUtils::vec2str(getElements<5>(data_elements)) << "_"; + return result.str(); +} + +void ConstantLayerTest::SetUp() { + std::vector data_shape; + InferenceEngine::Precision data_precision; + std::vector data_elements; + + std::tie(data_shape, data_precision, data_elements, targetDevice) = this->GetParam(); + + const auto precision = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(data_precision); + auto constant = ngraph::op::Constant::create(precision, data_shape, data_elements); + ngraph::ResultVector results{std::make_shared(constant)}; + + function = std::make_shared(results, ngraph::ParameterVector{}, "constant"); +} +} // namespace LayerTestsDefinitions diff --git a/inference-engine/tests/ie_test_utils/functional_test_utils/include/functional_test_utils/precision_utils.hpp b/inference-engine/tests/ie_test_utils/functional_test_utils/include/functional_test_utils/precision_utils.hpp index 79088b00f66..ff7ba66412f 100644 --- a/inference-engine/tests/ie_test_utils/functional_test_utils/include/functional_test_utils/precision_utils.hpp +++ b/inference-engine/tests/ie_test_utils/functional_test_utils/include/functional_test_utils/precision_utils.hpp @@ -27,6 +27,10 @@ inline ::ngraph::element::Type convertIE2nGraphPrc(const InferenceEngine::Precis return ::ngraph::element::Type(::ngraph::element::Type_t::f16); case InferenceEngine::Precision::BF16: return ::ngraph::element::Type(::ngraph::element::Type_t::bf16); + case InferenceEngine::Precision::U4: + return ::ngraph::element::Type(::ngraph::element::Type_t::u4); + case InferenceEngine::Precision::I4: + return ::ngraph::element::Type(::ngraph::element::Type_t::i4); case InferenceEngine::Precision::U8: return ::ngraph::element::Type(::ngraph::element::Type_t::u8); case InferenceEngine::Precision::I8: diff --git a/inference-engine/tests/ngraph_helpers/ngraph_functions/src/utils/ngraph_helpers.cpp b/inference-engine/tests/ngraph_helpers/ngraph_functions/src/utils/ngraph_helpers.cpp index 93dca7990f8..6c5b07f9e3b 100644 --- a/inference-engine/tests/ngraph_helpers/ngraph_functions/src/utils/ngraph_helpers.cpp +++ b/inference-engine/tests/ngraph_helpers/ngraph_functions/src/utils/ngraph_helpers.cpp @@ -136,7 +136,7 @@ std::vector> interpreterFunction(const std::shared_ptr for (size_t resultIndex = 0; resultIndex < results.size(); resultIndex++) { auto& output = outputs[resultIndex]; const auto& outputTensor = outputTensors[resultIndex]; - output.resize(shape_size(outputTensor->get_shape()) * outputTensor->get_element_type().size()); + output.resize(ceil(shape_size(outputTensor->get_shape()) * outputTensor->get_element_type().bitwidth() / 8.f)); outputTensors[resultIndex]->read(output.data(), output.size()); if (!convertType.empty() && convertType[resultIndex] != element::Type_t::undefined && outputTensor->get_element_type() != element::Type(convertType[resultIndex])) @@ -300,19 +300,168 @@ bool is_tensor_iterator_exist(const std::shared_ptr & func) { } namespace { +template ::value, bool>::type = true> +Value fix_sign(In v) { + return v; +} +template ::value, bool>::type = true> +Value fix_sign(In v) { + constexpr unsigned sign_bit = 1u << (Bitwidth -1); + const bool is_negative_number = v & sign_bit; + return is_negative_number ? v | 0xFFF0 : v; +} + +template +class LowPrecisionWrapper { +public: + static constexpr int bitwidth = Bitwidth; + static constexpr uint8_t value_mask = (1u << bitwidth) - 1u; + static constexpr int elements_in_byte = 8 / bitwidth; + + LowPrecisionWrapper(uint8_t* data, int position): data(data), position(position) {} + + operator Value() const { + return fix_sign(((*data) >> (position * bitwidth)) & value_mask); + } + + LowPrecisionWrapper& operator=(Value v) { + uint8_t masked_value = v & value_mask; + *data &= ~(value_mask << (position * bitwidth)); + *data |= masked_value << (position * bitwidth); + return *this; + } + +private: + int position{elements_in_byte - 1}; + uint8_t* data; +}; + +template +class LowPrecisionWrapperToConst { +public: + static constexpr int bitwidth = Bitwidth; + static constexpr uint8_t value_mask = (1u << bitwidth) - 1u; + static constexpr int elements_in_byte = 8 / bitwidth; + + LowPrecisionWrapperToConst(const uint8_t* data, int position): data(data), position(position) {} + + operator Value() const { + return fix_sign(((*data) >> (position * bitwidth)) & value_mask); + } + +private: + int position{elements_in_byte - 1}; + const uint8_t* data; +}; + +template +class LowPrecistionRange { +public: + static constexpr int bitwidth = Bitwidth; + static constexpr int elements_in_byte = 8 / bitwidth; + + LowPrecistionRange(uint8_t* data): data(data) {} + + LowPrecisionWrapper operator[](size_t index) const { + const ptrdiff_t byte_offset = index / elements_in_byte; + const int bit_position = elements_in_byte - 1 - (index % elements_in_byte); + return {data + byte_offset, bit_position}; + } + + uint8_t* data; +}; + +template +class LowPrecistionConstRange { +public: + static constexpr int bitwidth = Bitwidth; + static constexpr int elements_in_byte = 8 / bitwidth; + + LowPrecistionConstRange(const uint8_t* data) : data(data) {} + + LowPrecisionWrapperToConst operator[](size_t index) const { + const ptrdiff_t byte_offset = index / elements_in_byte; + const int bit_position = elements_in_byte - 1 - (index % elements_in_byte); + return {data + byte_offset, bit_position}; + } + + const uint8_t* data; +}; + +template ::type = true> +const fundamental_type_for* cast_to(const uint8_t* data) { + return reinterpret_cast*>(data); +} + +template ::type = true> +fundamental_type_for* cast_to(uint8_t* data) { + return reinterpret_cast*>(data); +} + +template ::type = true> +LowPrecistionConstRange<1, uint8_t> cast_to(const uint8_t* data) { + return LowPrecistionConstRange<1, uint8_t>(data); +} + +template ::type = true> +LowPrecistionRange<1, uint8_t> cast_to(uint8_t* data) { + return LowPrecistionRange<1, uint8_t>(data); +} + +template ::type = true> +LowPrecistionConstRange<4, uint8_t> cast_to(const uint8_t* data) { + return LowPrecistionConstRange<4, uint8_t>(data); +} + +template ::type = true> +LowPrecistionRange<4, uint8_t> cast_to(uint8_t* data) { + return LowPrecistionRange<4, uint8_t>(data); +} + +template ::type = true> +LowPrecistionConstRange<4, int8_t> cast_to(const uint8_t* data) { + return LowPrecistionConstRange<4, int8_t>(data); +} + +template ::type = true> +LowPrecistionRange<4, int8_t> cast_to(uint8_t* data) { + return LowPrecistionRange<4, int8_t>(data); +} template std::vector convertPrecision(const std::vector &buffer, const size_t elementsCount) { using fromPrec = fundamental_type_for; using toPrec = fundamental_type_for; - NGRAPH_CHECK(buffer.size() >= elementsCount * sizeof(fromPrec), "avoid buffer overflow"); + const size_t min_buffer_size = [&] { + element::Type from_type(FromType); + if (from_type.bitwidth() >= 8) { + return elementsCount * sizeof(fromPrec); + } + return from_type.bitwidth() * elementsCount / 8; + }(); + + NGRAPH_CHECK(buffer.size() >= min_buffer_size, "avoid buffer overflow"); constexpr auto elementSize = sizeof(toPrec); std::vector convertedData(elementsCount * elementSize); - const fromPrec *src = reinterpret_cast(buffer.data()); - toPrec *dst = reinterpret_cast(convertedData.data()); + auto src = cast_to(buffer.data()); + auto dst = cast_to(convertedData.data()); for (size_t i = 0; i < elementsCount; i++) { dst[i] = static_cast(src[i]); } @@ -337,6 +486,9 @@ std::vector convertPrecisionFrom(const std::vector & case element::Type_t::f64: { return convertPrecision(output, elementsCount); } + case element::Type_t::i4: { + return convertPrecision(output, elementsCount); + } case element::Type_t::i8: { return convertPrecision(output, elementsCount); } @@ -349,6 +501,12 @@ std::vector convertPrecisionFrom(const std::vector & case element::Type_t::i64: { return convertPrecision(output, elementsCount); } + case element::Type_t::u1: { + return convertPrecision(output, elementsCount); + } + case element::Type_t::u4: { + return convertPrecision(output, elementsCount); + } case element::Type_t::u8: { return convertPrecision(output, elementsCount); } @@ -389,6 +547,9 @@ std::vector convertOutputPrecision(const std::vector case element::Type_t::f64: { return convertPrecisionFrom(output, toPrecision, elementsCount); } + case element::Type_t::i4: { + return convertPrecisionFrom(output, toPrecision, elementsCount); + } case element::Type_t::i8: { return convertPrecisionFrom(output, toPrecision, elementsCount); } @@ -401,6 +562,12 @@ std::vector convertOutputPrecision(const std::vector case element::Type_t::i64: { return convertPrecisionFrom(output, toPrecision, elementsCount); } + case element::Type_t::u1: { + return convertPrecisionFrom(output, toPrecision, elementsCount); + } + case element::Type_t::u4: { + return convertPrecisionFrom(output, toPrecision, elementsCount); + } case element::Type_t::u8: { return convertPrecisionFrom(output, toPrecision, elementsCount); }