Made ov::runtime::Tensor strides to be in bytes (#8078)

This commit is contained in:
Ilya Lavrenov 2021-10-21 22:35:58 +03:00 committed by GitHub
parent 0fb24e8040
commit dfefb92164
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 65 additions and 36 deletions

View File

@ -79,8 +79,8 @@ public:
* @param type Tensor element type
* @param shape Tensor shape
* @param host_ptr Pointer to pre-allocated host memory
* @param strides Optional strides parameters in elements. Strides are supposed to be equal to shape if they are not
* set
* @param strides Optional strides parameters in bytes. Strides are supposed to be computed automatically based
* on shape and element size
*/
Tensor(const element::Type type, const Shape& shape, void* host_ptr, const Strides& strides = {});
@ -124,7 +124,7 @@ public:
size_t get_byte_size() const;
/**
* @return Tensor's strides in elements
* @return Tensor's strides in bytes
*/
Strides get_strides() const;

View File

@ -40,15 +40,26 @@ Tensor::Tensor(const element::Type element_type, const Shape& shape, const Alloc
_impl->allocate();
}
Tensor::Tensor(const element::Type element_type, const Shape& shape, void* host_ptr, const Strides& strides) {
Tensor::Tensor(const element::Type element_type, const Shape& shape, void* host_ptr, const Strides& byte_strides) {
ie::SizeVector blk_order(shape.size());
std::iota(blk_order.begin(), blk_order.end(), 0);
ie::SizeVector dim_offset(shape.size(), 0);
ie::SizeVector blk_strides;
if (strides.empty()) {
if (byte_strides.empty()) {
blk_strides = ov::row_major_strides(shape);
} else {
blk_strides.assign(strides.begin(), strides.end());
blk_strides.resize(byte_strides.size());
std::transform(byte_strides.begin(),
byte_strides.end(),
blk_strides.begin(),
[&element_type](size_t byte_stride) {
OPENVINO_ASSERT(byte_stride % element_type.size() == 0,
"Limitation: Stride in bytes ",
byte_stride,
" should be divisible by size of element ",
element_type.size());
return byte_stride / element_type.size();
});
}
try {
@ -93,7 +104,19 @@ Strides Tensor::get_strides() const {
OPENVINO_ASSERT(get_element_type().bitwidth() >= 8,
"Could not get strides for types with bitwidths less then 8 bit. Tensor type: ",
get_element_type());
OV_TENSOR_STATEMENT(return _impl->getTensorDesc().getBlockingDesc().getStrides());
OV_TENSOR_STATEMENT({
const auto& element_strides = _impl->getTensorDesc().getBlockingDesc().getStrides();
const size_t elem_size = get_element_type().size();
Strides byte_strides;
byte_strides.resize(element_strides.size());
std::transform(element_strides.begin(),
element_strides.end(),
byte_strides.begin(),
[&elem_size](size_t stride) {
return stride * elem_size;
});
return byte_strides;
});
}
size_t Tensor::get_size() const {
@ -120,6 +143,7 @@ void* Tensor::data(const element::Type element_type) const {
", is not representable as pointer to ",
element_type);
}
// since we don't use byte offsets, we need to explicitly multiply by element_size
auto byte_offset = _impl->getTensorDesc().getBlockingDesc().getOffsetPadding() * get_element_type().size();
OPENVINO_ASSERT((get_element_type().bitwidth() >= 8) || (byte_offset == 0),
"ROI access for types with bitwidths less then 8 bit is not implemented. Tensor type: ",

View File

@ -18,6 +18,13 @@
using OVTensorTest = ::testing::Test;
inline ov::Strides byteStrides(const ov::Strides& strides, const ov::element::Type& type) {
ov::Strides byte_strides(strides.size());
for (size_t i = 0; i < strides.size(); ++i)
byte_strides[i] = strides[i] * type.size();
return byte_strides;
}
TEST_F(OVTensorTest, canCreateTensor) {
ov::Shape shape = {4, 3, 2};
ov::runtime::Tensor t{ov::element::f32, shape};
@ -27,7 +34,7 @@ TEST_F(OVTensorTest, canCreateTensor) {
ASSERT_EQ(ov::element::f32, t.get_element_type());
ASSERT_EQ(shape, t.get_shape());
ASSERT_NE(shape, t.get_strides());
ASSERT_EQ(ov::Strides({6, 2, 1}), t.get_strides());
ASSERT_EQ(byteStrides(ov::Strides({6, 2, 1}), t.get_element_type()), t.get_strides());
ASSERT_EQ(ov::element::f32.size() * totalSize, t.get_byte_size());
ASSERT_THROW(t.data(ov::element::i64), ov::Exception);
ASSERT_THROW(t.data<std::int32_t>(), ov::Exception);
@ -72,7 +79,7 @@ TEST_F(OVTensorTest, canAccessExternalData) {
ASSERT_EQ(data, t.data(ov::element::f32));
ASSERT_EQ(data, ptr);
ASSERT_THROW(t.data<std::int16_t>(), ov::Exception);
ASSERT_EQ(ov::row_major_strides(shape), t.get_strides());
ASSERT_EQ(byteStrides(ov::row_major_strides(shape), t.get_element_type()), t.get_strides());
ASSERT_EQ(ov::shape_size(shape), t.get_size());
ASSERT_EQ(ov::shape_size(shape) * ov::element::f32.size(), t.get_byte_size());
}
@ -81,11 +88,11 @@ TEST_F(OVTensorTest, canAccessExternalData) {
TEST_F(OVTensorTest, canAccessExternalDataWithStrides) {
ov::Shape shape = {2, 3};
float data[] = {5.f, 6.f, 7.f, 0.f, 1.f, 42.f, 3.f, 0.f};
ov::runtime::Tensor t{ov::element::f32, shape, data, {4, 1}};
ASSERT_EQ(ov::Strides({4, 1}), t.get_strides());
ov::runtime::Tensor t{ov::element::f32, shape, data, {16, 4}};
ASSERT_EQ(ov::Strides({16, 4}), t.get_strides());
{
ASSERT_EQ((ov::Shape{2, 3}), t.get_shape());
float* ptr = t.data<float>();
const float* ptr = t.data<const float>();
ASSERT_EQ(ptr[5], 42);
}
}
@ -98,16 +105,23 @@ TEST_F(OVTensorTest, cannotCreateTensorWithExternalNullptr) {
TEST_F(OVTensorTest, cannotCreateTensorWithWrongStrides) {
ov::Shape shape = {2, 3};
float data[] = {5.f, 6.f, 7.f, 0.f, 1.f, 42.f, 3.f, 0.f};
const auto el = ov::element::f32;
{
// strides.size() != shape.size()
EXPECT_THROW(ov::runtime::Tensor(ov::element::f32, shape, data, {6, 3, 1}), ov::Exception);
EXPECT_THROW(ov::runtime::Tensor(el, shape, data, byteStrides({6, 3, 1}, el)), ov::Exception);
}
{
// strides values are element-wise >= ov::row_major_strides(shape) values
EXPECT_THROW(ov::runtime::Tensor(ov::element::f32, shape, data, {2, 1}), ov::Exception);
EXPECT_THROW(ov::runtime::Tensor(ov::element::f32, shape, data, {3, 0}), ov::Exception);
EXPECT_THROW(ov::runtime::Tensor(ov::element::f32, shape, data, {3, 2}), ov::Exception);
EXPECT_NO_THROW(ov::runtime::Tensor(ov::element::f32, shape, data, {6, 2}));
EXPECT_THROW(ov::runtime::Tensor(el, shape, data, byteStrides({2, 1}, el)), ov::Exception);
EXPECT_THROW(ov::runtime::Tensor(el, shape, data, byteStrides({3, 0}, el)), ov::Exception);
EXPECT_THROW(ov::runtime::Tensor(el, shape, data, byteStrides({3, 2}, el)), ov::Exception);
EXPECT_NO_THROW(ov::runtime::Tensor(el, shape, data, byteStrides({6, 2}, el)));
}
{
// strides are not divisible by elem_size
EXPECT_THROW(ov::runtime::Tensor(el, shape, data, {7, el.size()}), ov::Exception);
EXPECT_THROW(ov::runtime::Tensor(el, shape, data, {3, 0}), ov::Exception);
EXPECT_THROW(ov::runtime::Tensor(el, shape, data, {el.size(), 3}), ov::Exception);
}
}
@ -119,7 +133,7 @@ TEST_F(OVTensorTest, saveDimsAndSizeAfterMove) {
ASSERT_EQ(shape, new_tensor.get_shape());
ASSERT_EQ(ov::element::f32, new_tensor.get_element_type());
ASSERT_EQ(ov::row_major_strides(shape), new_tensor.get_strides());
ASSERT_EQ(byteStrides(ov::row_major_strides(shape), new_tensor.get_element_type()), new_tensor.get_strides());
ASSERT_THROW(t.get_size(), ov::Exception);
ASSERT_THROW(t.get_element_type(), ov::Exception);
@ -141,7 +155,7 @@ TEST_F(OVTensorTest, canSetShape) {
ASSERT_EQ(t.get_shape(), origShape);
ASSERT_NO_THROW(t.set_shape({4, 5, 6}));
ASSERT_EQ(newShape, t.get_shape());
ASSERT_EQ(ov::row_major_strides(newShape), t.get_strides());
ASSERT_EQ(byteStrides(ov::row_major_strides(newShape), t.get_element_type()), t.get_strides());
ASSERT_NE(orig_data, t.data());
// check that setShape for copy changes original Tensor
@ -180,7 +194,7 @@ TEST_F(OVTensorTest, makeRangeRoiTensor) {
ASSERT_EQ(roi_tensor.data<int32_t>() - t.data<int32_t>(), ref_offset_elems);
ASSERT_EQ(reinterpret_cast<uint8_t*>(roi_tensor.data()) - reinterpret_cast<uint8_t*>(t.data()), ref_offset_bytes);
ASSERT_EQ(roi_tensor.get_strides(), t.get_strides());
ASSERT_EQ(ref_strides, roi_tensor.get_strides());
ASSERT_EQ(byteStrides(ref_strides, roi_tensor.get_element_type()), roi_tensor.get_strides());
ASSERT_EQ(roi_tensor.get_element_type(), t.get_element_type());
}
@ -218,14 +232,15 @@ TEST_F(OVTensorTest, readRangeRoiBlob) {
ov::runtime::Tensor roi_tensor{t, {0, 0, 2, 4}, {1, 3, 4, 8}};
ASSERT_NE(false, static_cast<bool>(roi_tensor));
{
auto roi = roi_tensor.data<int32_t>();
const std::uint8_t* roi = reinterpret_cast<const std::uint8_t*>(roi_tensor.data());
ASSERT_NE(nullptr, roi);
auto strides = roi_tensor.get_strides();
for (auto&& c : ngraph::CoordinateTransformBasic{roi_tensor.get_shape()}) {
auto actual = roi[c[3] * strides[3] + c[2] * strides[2] + c[1] * strides[1] + c[0] * strides[0]];
auto expected = t.data<int32_t>()[(c[3] + 4) * strides[3] + (c[2] + 2) * strides[2] +
(c[1] + 0) * strides[1] + (c[0] + 0) * strides[0]];
ASSERT_EQ(expected, actual) << c;
auto actual_addr = roi + c[3] * strides[3] + c[2] * strides[2] + c[1] * strides[1] + c[0] * strides[0];
auto expected_addr = t.data<int32_t>() + ((c[3] + 4) * strides[3] + (c[2] + 2) * strides[2] +
(c[1] + 0) * strides[1] + (c[0] + 0) * strides[0]) /
t.get_element_type().size();
ASSERT_EQ(actual_addr, reinterpret_cast<const std::uint8_t*>(expected_addr));
}
}
}

View File

@ -73,14 +73,6 @@ const std::map<py::str, ov::element::Type> dtype_to_ov_type = {
{"bool", ov::element::boolean},
};
ov::Strides to_numpy_strides(const ov::Strides& strides, const ov::element::Type& ov_type) {
ov::Strides numpy_strides(strides.size());
std::transform(strides.begin(), strides.end(), numpy_strides.begin(), [&ov_type](size_t stride) {
return stride * ov_type.size();
});
return numpy_strides;
}
InferenceEngine::Layout get_layout_from_string(const std::string& layout) {
return layout_str_to_enum.at(layout);
}

View File

@ -36,8 +36,6 @@ namespace Common
extern const std::map<ov::element::Type, py::dtype> ov_type_to_dtype;
extern const std::map<py::str, ov::element::Type> dtype_to_ov_type;
ov::Strides to_numpy_strides(const ov::Strides& strides, const ov::element::Type& ov_type);
InferenceEngine::Layout get_layout_from_string(const std::string& layout);
const std::string& get_layout_from_enum(const InferenceEngine::Layout& layout);

View File

@ -71,7 +71,7 @@ void regclass_Tensor(py::module m) {
cls.def_property_readonly("data", [](ov::runtime::Tensor& self) {
return py::array(Common::ov_type_to_dtype.at(self.get_element_type()),
self.get_shape(),
Common::to_numpy_strides(self.get_strides(), self.get_element_type()),
self.get_strides(),
self.data(),
py::cast(self));
});