Constant op SLT (#5349)

* add SLT for Constant OP

* add test for U4 and I4

* drop test for BIN data

Co-authored-by: Patryk Elszkowski <patryk.elszkowki@intel.com>
This commit is contained in:
Patryk Elszkowski 2021-04-27 06:09:23 +02:00 committed by GitHub
parent 6581127114
commit 5de5f4d7d1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 377 additions and 4 deletions

View File

@ -0,0 +1,56 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "shared_test_classes/single_layer/constant.hpp"
#include <vector>
using namespace LayerTestsDefinitions;
namespace {
TEST_P(ConstantLayerTest, Serialize) {
Serialize();
}
std::vector<std::vector<size_t>> shapes = {
{2, 2, 3},
{3, 4, 1},
{1, 1, 12},
};
std::vector<InferenceEngine::Precision> precisions = {
InferenceEngine::Precision::BIN, InferenceEngine::Precision::BF16,
InferenceEngine::Precision::FP16, InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP64, InferenceEngine::Precision::U4,
InferenceEngine::Precision::U8, InferenceEngine::Precision::U16,
InferenceEngine::Precision::U32, InferenceEngine::Precision::I4,
InferenceEngine::Precision::I8, InferenceEngine::Precision::I16,
InferenceEngine::Precision::I32,
};
std::vector<std::string> data = {"1", "2", "3", "4", "5", "6", "7", "1", "2", "3", "4", "5"};
std::vector<InferenceEngine::Precision> precisionsWithNegativeValues = {
InferenceEngine::Precision::BF16, InferenceEngine::Precision::FP16,
InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP64,
InferenceEngine::Precision::I4, InferenceEngine::Precision::I8,
InferenceEngine::Precision::I16, InferenceEngine::Precision::I32,
};
std::vector<std::string> dataWithNegativeValues = {"1", "-2", "3", "-4", "5", "-6",
"7", "-1", "2", "-3", "4", "-5"};
INSTANTIATE_TEST_CASE_P(smoke_Constant_Serialization, ConstantLayerTest,
::testing::Combine(::testing::ValuesIn(shapes),
::testing::ValuesIn(precisions), ::testing::Values(data),
::testing::Values(CommonTestUtils::DEVICE_CPU)),
ConstantLayerTest::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_Constant_Serialization_with_negative_values, ConstantLayerTest,
::testing::Combine(::testing::ValuesIn(shapes),
::testing::ValuesIn(precisionsWithNegativeValues),
::testing::Values(dataWithNegativeValues),
::testing::Values(CommonTestUtils::DEVICE_CPU)),
ConstantLayerTest::getTestCaseName);
} // namespace

View File

@ -0,0 +1,54 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "single_layer_tests/constant.hpp"
#include <vector>
#include "common_test_utils/test_constants.hpp"
using namespace LayerTestsDefinitions;
namespace {
std::vector<std::vector<size_t>> shapes{
{2, 2, 3},
{3, 4, 1},
{1, 1, 12},
};
std::vector<InferenceEngine::Precision> precisions{
InferenceEngine::Precision::BF16, InferenceEngine::Precision::FP16,
InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP64,
InferenceEngine::Precision::U4, InferenceEngine::Precision::U8,
InferenceEngine::Precision::U16, InferenceEngine::Precision::U32,
InferenceEngine::Precision::I4, InferenceEngine::Precision::I8,
InferenceEngine::Precision::I16, InferenceEngine::Precision::I32,
};
std::vector<std::string> data{"0", "1", "2", "3", "4", "5", "6", "7", "0", "1", "2", "3"};
std::vector<InferenceEngine::Precision> precisionsWithNegativeValues{
InferenceEngine::Precision::BF16, InferenceEngine::Precision::FP16,
InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP64,
InferenceEngine::Precision::I4, InferenceEngine::Precision::I8,
InferenceEngine::Precision::I16, InferenceEngine::Precision::I32,
};
std::vector<std::string> dataWithNegativeValues{"1", "-2", "3", "-4", "5", "-6",
"7", "-1", "2", "-3", "4", "-5"};
INSTANTIATE_TEST_CASE_P(smoke_Constant, ConstantLayerTest,
::testing::Combine(::testing::ValuesIn(shapes),
::testing::ValuesIn(precisions), ::testing::Values(data),
::testing::Values(CommonTestUtils::DEVICE_CPU)),
ConstantLayerTest::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_Constant_with_negative_values, ConstantLayerTest,
::testing::Combine(::testing::ValuesIn(shapes),
::testing::ValuesIn(precisionsWithNegativeValues),
::testing::Values(dataWithNegativeValues),
::testing::Values(CommonTestUtils::DEVICE_CPU)),
ConstantLayerTest::getTestCaseName);
} // namespace

View File

@ -0,0 +1,15 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "shared_test_classes/single_layer/constant.hpp"
namespace LayerTestsDefinitions {
TEST_P(ConstantLayerTest, CompareWithRefs) {
Run();
};
} // namespace LayerTestsDefinitions

View File

@ -0,0 +1,32 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <string>
#include <tuple>
#include <vector>
#include "ngraph_functions/builders.hpp"
#include "ngraph_functions/utils/ngraph_helpers.hpp"
#include "shared_test_classes/base/layer_test_utils.hpp"
namespace LayerTestsDefinitions {
using constantParamsTuple = typename std::tuple<
std::vector<size_t>, // Constant data shape
InferenceEngine::Precision, // Constant data precision
std::vector<std::string>, // Constant elements
std::string>; // Device name
class ConstantLayerTest : public testing::WithParamInterface<constantParamsTuple>,
virtual public LayerTestsUtils::LayerTestsCommon {
public:
static std::string getTestCaseName(const testing::TestParamInfo<constantParamsTuple> &obj);
protected:
void SetUp() override;
};
} // namespace LayerTestsDefinitions

View File

@ -0,0 +1,45 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "shared_test_classes/single_layer/constant.hpp"
namespace LayerTestsDefinitions {
namespace {
template <size_t N>
std::vector<std::string> getElements(const std::vector<std::string>& v) {
const auto new_size = std::min(N, v.size());
return {begin(v), std::next(begin(v), new_size)};
}
} // namespace
std::string ConstantLayerTest::getTestCaseName(
const testing::TestParamInfo<constantParamsTuple>& obj) {
std::vector<size_t> data_shape;
InferenceEngine::Precision data_precision;
std::vector<std::string> data_elements;
std::string targetName;
std::tie(data_shape, data_precision, data_elements, targetName) = obj.param;
std::ostringstream result;
result << "S=" << CommonTestUtils::vec2str(data_shape) << "_";
result << "dataPRC=" << data_precision.name() << "_";
result << "dataValue=" << CommonTestUtils::vec2str(getElements<5>(data_elements)) << "_";
return result.str();
}
void ConstantLayerTest::SetUp() {
std::vector<size_t> data_shape;
InferenceEngine::Precision data_precision;
std::vector<std::string> data_elements;
std::tie(data_shape, data_precision, data_elements, targetDevice) = this->GetParam();
const auto precision = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(data_precision);
auto constant = ngraph::op::Constant::create(precision, data_shape, data_elements);
ngraph::ResultVector results{std::make_shared<ngraph::opset1::Result>(constant)};
function = std::make_shared<ngraph::Function>(results, ngraph::ParameterVector{}, "constant");
}
} // namespace LayerTestsDefinitions

View File

@ -27,6 +27,10 @@ inline ::ngraph::element::Type convertIE2nGraphPrc(const InferenceEngine::Precis
return ::ngraph::element::Type(::ngraph::element::Type_t::f16); return ::ngraph::element::Type(::ngraph::element::Type_t::f16);
case InferenceEngine::Precision::BF16: case InferenceEngine::Precision::BF16:
return ::ngraph::element::Type(::ngraph::element::Type_t::bf16); return ::ngraph::element::Type(::ngraph::element::Type_t::bf16);
case InferenceEngine::Precision::U4:
return ::ngraph::element::Type(::ngraph::element::Type_t::u4);
case InferenceEngine::Precision::I4:
return ::ngraph::element::Type(::ngraph::element::Type_t::i4);
case InferenceEngine::Precision::U8: case InferenceEngine::Precision::U8:
return ::ngraph::element::Type(::ngraph::element::Type_t::u8); return ::ngraph::element::Type(::ngraph::element::Type_t::u8);
case InferenceEngine::Precision::I8: case InferenceEngine::Precision::I8:

View File

@ -136,7 +136,7 @@ std::vector<std::vector<std::uint8_t>> interpreterFunction(const std::shared_ptr
for (size_t resultIndex = 0; resultIndex < results.size(); resultIndex++) { for (size_t resultIndex = 0; resultIndex < results.size(); resultIndex++) {
auto& output = outputs[resultIndex]; auto& output = outputs[resultIndex];
const auto& outputTensor = outputTensors[resultIndex]; const auto& outputTensor = outputTensors[resultIndex];
output.resize(shape_size(outputTensor->get_shape()) * outputTensor->get_element_type().size()); output.resize(ceil(shape_size(outputTensor->get_shape()) * outputTensor->get_element_type().bitwidth() / 8.f));
outputTensors[resultIndex]->read(output.data(), output.size()); outputTensors[resultIndex]->read(output.data(), output.size());
if (!convertType.empty() && convertType[resultIndex] != element::Type_t::undefined && if (!convertType.empty() && convertType[resultIndex] != element::Type_t::undefined &&
outputTensor->get_element_type() != element::Type(convertType[resultIndex])) outputTensor->get_element_type() != element::Type(convertType[resultIndex]))
@ -300,19 +300,168 @@ bool is_tensor_iterator_exist(const std::shared_ptr<ngraph::Function> & func) {
} }
namespace { namespace {
template <int Bitwidth, typename Value, typename In,
typename std::enable_if<std::is_unsigned<Value>::value, bool>::type = true>
Value fix_sign(In v) {
return v;
}
template <int Bitwidth, typename Value, typename In,
typename std::enable_if<std::is_signed<Value>::value, bool>::type = true>
Value fix_sign(In v) {
constexpr unsigned sign_bit = 1u << (Bitwidth -1);
const bool is_negative_number = v & sign_bit;
return is_negative_number ? v | 0xFFF0 : v;
}
template<int Bitwidth, typename Value>
class LowPrecisionWrapper {
public:
static constexpr int bitwidth = Bitwidth;
static constexpr uint8_t value_mask = (1u << bitwidth) - 1u;
static constexpr int elements_in_byte = 8 / bitwidth;
LowPrecisionWrapper(uint8_t* data, int position): data(data), position(position) {}
operator Value() const {
return fix_sign<Bitwidth, Value>(((*data) >> (position * bitwidth)) & value_mask);
}
LowPrecisionWrapper& operator=(Value v) {
uint8_t masked_value = v & value_mask;
*data &= ~(value_mask << (position * bitwidth));
*data |= masked_value << (position * bitwidth);
return *this;
}
private:
int position{elements_in_byte - 1};
uint8_t* data;
};
template<int Bitwidth, typename Value>
class LowPrecisionWrapperToConst {
public:
static constexpr int bitwidth = Bitwidth;
static constexpr uint8_t value_mask = (1u << bitwidth) - 1u;
static constexpr int elements_in_byte = 8 / bitwidth;
LowPrecisionWrapperToConst(const uint8_t* data, int position): data(data), position(position) {}
operator Value() const {
return fix_sign<Bitwidth, Value>(((*data) >> (position * bitwidth)) & value_mask);
}
private:
int position{elements_in_byte - 1};
const uint8_t* data;
};
template<int Bitwidth, typename Value>
class LowPrecistionRange {
public:
static constexpr int bitwidth = Bitwidth;
static constexpr int elements_in_byte = 8 / bitwidth;
LowPrecistionRange(uint8_t* data): data(data) {}
LowPrecisionWrapper<Bitwidth, Value> operator[](size_t index) const {
const ptrdiff_t byte_offset = index / elements_in_byte;
const int bit_position = elements_in_byte - 1 - (index % elements_in_byte);
return {data + byte_offset, bit_position};
}
uint8_t* data;
};
template<int Bitwidth, typename Value>
class LowPrecistionConstRange {
public:
static constexpr int bitwidth = Bitwidth;
static constexpr int elements_in_byte = 8 / bitwidth;
LowPrecistionConstRange(const uint8_t* data) : data(data) {}
LowPrecisionWrapperToConst<Bitwidth, Value> operator[](size_t index) const {
const ptrdiff_t byte_offset = index / elements_in_byte;
const int bit_position = elements_in_byte - 1 - (index % elements_in_byte);
return {data + byte_offset, bit_position};
}
const uint8_t* data;
};
template <element::Type_t FromType, typename std::enable_if<FromType != element::Type_t::u1 &&
FromType != element::Type_t::u4 &&
FromType != element::Type_t::i4,
bool>::type = true>
const fundamental_type_for<FromType>* cast_to(const uint8_t* data) {
return reinterpret_cast<const fundamental_type_for<FromType>*>(data);
}
template <element::Type_t FromType, typename std::enable_if<FromType != element::Type_t::u1 &&
FromType != element::Type_t::u4 &&
FromType != element::Type_t::i4,
bool>::type = true>
fundamental_type_for<FromType>* cast_to(uint8_t* data) {
return reinterpret_cast<fundamental_type_for<FromType>*>(data);
}
template <element::Type_t FromType,
typename std::enable_if<FromType == element::Type_t::u1, bool>::type = true>
LowPrecistionConstRange<1, uint8_t> cast_to(const uint8_t* data) {
return LowPrecistionConstRange<1, uint8_t>(data);
}
template <element::Type_t FromType,
typename std::enable_if<FromType == element::Type_t::u1, bool>::type = true>
LowPrecistionRange<1, uint8_t> cast_to(uint8_t* data) {
return LowPrecistionRange<1, uint8_t>(data);
}
template <element::Type_t FromType,
typename std::enable_if<FromType == element::Type_t::u4, bool>::type = true>
LowPrecistionConstRange<4, uint8_t> cast_to(const uint8_t* data) {
return LowPrecistionConstRange<4, uint8_t>(data);
}
template <element::Type_t FromType,
typename std::enable_if<FromType == element::Type_t::u4, bool>::type = true>
LowPrecistionRange<4, uint8_t> cast_to(uint8_t* data) {
return LowPrecistionRange<4, uint8_t>(data);
}
template <element::Type_t FromType,
typename std::enable_if<FromType == element::Type_t::i4, bool>::type = true>
LowPrecistionConstRange<4, int8_t> cast_to(const uint8_t* data) {
return LowPrecistionConstRange<4, int8_t>(data);
}
template <element::Type_t FromType,
typename std::enable_if<FromType == element::Type_t::i4, bool>::type = true>
LowPrecistionRange<4, int8_t> cast_to(uint8_t* data) {
return LowPrecistionRange<4, int8_t>(data);
}
template <element::Type_t FromType, element::Type_t ToType> template <element::Type_t FromType, element::Type_t ToType>
std::vector<std::uint8_t> convertPrecision(const std::vector<std::uint8_t> &buffer, const size_t elementsCount) { std::vector<std::uint8_t> convertPrecision(const std::vector<std::uint8_t> &buffer, const size_t elementsCount) {
using fromPrec = fundamental_type_for<FromType>; using fromPrec = fundamental_type_for<FromType>;
using toPrec = fundamental_type_for<ToType>; using toPrec = fundamental_type_for<ToType>;
NGRAPH_CHECK(buffer.size() >= elementsCount * sizeof(fromPrec), "avoid buffer overflow"); const size_t min_buffer_size = [&] {
element::Type from_type(FromType);
if (from_type.bitwidth() >= 8) {
return elementsCount * sizeof(fromPrec);
}
return from_type.bitwidth() * elementsCount / 8;
}();
NGRAPH_CHECK(buffer.size() >= min_buffer_size, "avoid buffer overflow");
constexpr auto elementSize = sizeof(toPrec); constexpr auto elementSize = sizeof(toPrec);
std::vector<std::uint8_t> convertedData(elementsCount * elementSize); std::vector<std::uint8_t> convertedData(elementsCount * elementSize);
const fromPrec *src = reinterpret_cast<const fromPrec *>(buffer.data()); auto src = cast_to<FromType>(buffer.data());
toPrec *dst = reinterpret_cast<toPrec *>(convertedData.data()); auto dst = cast_to<ToType>(convertedData.data());
for (size_t i = 0; i < elementsCount; i++) { for (size_t i = 0; i < elementsCount; i++) {
dst[i] = static_cast<toPrec>(src[i]); dst[i] = static_cast<toPrec>(src[i]);
} }
@ -337,6 +486,9 @@ std::vector<std::uint8_t> convertPrecisionFrom(const std::vector<std::uint8_t> &
case element::Type_t::f64: { case element::Type_t::f64: {
return convertPrecision<FromType, element::Type_t::f64>(output, elementsCount); return convertPrecision<FromType, element::Type_t::f64>(output, elementsCount);
} }
case element::Type_t::i4: {
return convertPrecision<FromType, element::Type_t::i4>(output, elementsCount);
}
case element::Type_t::i8: { case element::Type_t::i8: {
return convertPrecision<FromType, element::Type_t::i8>(output, elementsCount); return convertPrecision<FromType, element::Type_t::i8>(output, elementsCount);
} }
@ -349,6 +501,12 @@ std::vector<std::uint8_t> convertPrecisionFrom(const std::vector<std::uint8_t> &
case element::Type_t::i64: { case element::Type_t::i64: {
return convertPrecision<FromType, element::Type_t::i64>(output, elementsCount); return convertPrecision<FromType, element::Type_t::i64>(output, elementsCount);
} }
case element::Type_t::u1: {
return convertPrecision<FromType, element::Type_t::u1>(output, elementsCount);
}
case element::Type_t::u4: {
return convertPrecision<FromType, element::Type_t::u4>(output, elementsCount);
}
case element::Type_t::u8: { case element::Type_t::u8: {
return convertPrecision<FromType, element::Type_t::u8>(output, elementsCount); return convertPrecision<FromType, element::Type_t::u8>(output, elementsCount);
} }
@ -389,6 +547,9 @@ std::vector<std::uint8_t> convertOutputPrecision(const std::vector<std::uint8_t>
case element::Type_t::f64: { case element::Type_t::f64: {
return convertPrecisionFrom<element::Type_t::f64>(output, toPrecision, elementsCount); return convertPrecisionFrom<element::Type_t::f64>(output, toPrecision, elementsCount);
} }
case element::Type_t::i4: {
return convertPrecisionFrom<element::Type_t::i4>(output, toPrecision, elementsCount);
}
case element::Type_t::i8: { case element::Type_t::i8: {
return convertPrecisionFrom<element::Type_t::i8>(output, toPrecision, elementsCount); return convertPrecisionFrom<element::Type_t::i8>(output, toPrecision, elementsCount);
} }
@ -401,6 +562,12 @@ std::vector<std::uint8_t> convertOutputPrecision(const std::vector<std::uint8_t>
case element::Type_t::i64: { case element::Type_t::i64: {
return convertPrecisionFrom<element::Type_t::i64>(output, toPrecision, elementsCount); return convertPrecisionFrom<element::Type_t::i64>(output, toPrecision, elementsCount);
} }
case element::Type_t::u1: {
return convertPrecisionFrom<element::Type_t::u1>(output, toPrecision, elementsCount);
}
case element::Type_t::u4: {
return convertPrecisionFrom<element::Type_t::u4>(output, toPrecision, elementsCount);
}
case element::Type_t::u8: { case element::Type_t::u8: {
return convertPrecisionFrom<element::Type_t::u8>(output, toPrecision, elementsCount); return convertPrecisionFrom<element::Type_t::u8>(output, toPrecision, elementsCount);
} }