Added range roi blob API (#7567)

This commit is contained in:
Anton Pankratv 2021-09-22 10:54:37 +03:00 committed by GitHub
parent e2272331be
commit 5dc773e51b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 202 additions and 2 deletions

View File

@ -242,6 +242,18 @@ public:
*/
virtual Blob::Ptr createROI(const ROI& roi) const;
/**
* @brief Creates a blob describing given ROI object based on the current blob with memory sharing.
*
* Note: default implementation may throws "not implemented" exception.
*
* @param begin A ROI start coordinate
* @param end A ROI end coordinate
*
* @return A shared pointer to the newly created ROI blob.
*/
virtual Blob::Ptr createROI(const std::vector<std::size_t>& begin, const std::vector<std::size_t>& end) const;
protected:
/**
* @brief The tensor descriptor of the given blob.
@ -662,6 +674,10 @@ public:
return Blob::Ptr(new TBlob<T>(*this, roi));
}
Blob::Ptr createROI(const std::vector<std::size_t>& begin, const std::vector<std::size_t>& end) const override {
return Blob::Ptr(new TBlob<T>(*this, begin, end));
}
/**
* @brief Gets BlobIterator for the data.
*
@ -789,6 +805,20 @@ protected:
_handle = origBlob._handle;
}
/**
* @brief Creates a blob from the existing blob with a given ROI
* @param origBlob An original blob
* @param begin ROI start coordinate
* @param end ROI end coordinate
*/
TBlob(const TBlob& origBlob, const std::vector<size_t>& begin, const std::vector<size_t>& end)
: MemoryBlob(make_roi_desc(origBlob.getTensorDesc(), begin, end, true)),
_allocator(origBlob._allocator) {
IE_ASSERT(origBlob._handle != nullptr) << "Original Blob must be allocated before ROI creation";
_handle = origBlob._handle;
}
};
#ifdef __clang__
@ -892,4 +922,15 @@ std::shared_ptr<T> make_shared_blob(Args&&... args) {
*/
INFERENCE_ENGINE_API_CPP(Blob::Ptr) make_shared_blob(const Blob::Ptr& inputBlob, const ROI& roi);
/**
* @brief Creates a blob describing given ROI object based on the given blob with pre-allocated memory.
*
* @param inputBlob original blob with pre-allocated memory.
* @param begin A ROI object start coordinate inside of the original blob.
* @param end A ROI object end coordinate inside of the original blob.
* @return A shared pointer to the newly created blob.
*/
INFERENCE_ENGINE_API_CPP(Blob::Ptr)
make_shared_blob(const Blob::Ptr& inputBlob, const std::vector<size_t>& begin, const std::vector<size_t>& end);
} // namespace InferenceEngine

View File

@ -374,4 +374,21 @@ struct ROI {
*/
INFERENCE_ENGINE_API_CPP(TensorDesc) make_roi_desc(const TensorDesc& origDesc, const ROI& roi, bool useOrigMemDesc);
/**
* @brief Creates a TensorDesc object for ROI.
*
* @param origDesc original TensorDesc object.
* @param begin start coordinate of ROI object inside of the original object.
* @param end end coordinate of ROI object inside of the original object.
* @param useOrigMemDesc Flag to use original memory description (strides/offset).
* Should be set if the new TensorDesc describes shared memory.
*
* @return A newly created TensorDesc object representing ROI.
*/
INFERENCE_ENGINE_API_CPP(TensorDesc)
make_roi_desc(const TensorDesc& origDesc,
const std::vector<size_t>& begin,
const std::vector<size_t>& end,
bool useOrigMemDesc);
} // namespace InferenceEngine

View File

@ -9,13 +9,26 @@
#include "ie_blob.h"
namespace InferenceEngine {
Blob::Ptr Blob::createROI(const ROI&) const {
Blob::Ptr Blob::createROI(const ROI& roi) const {
if (getTensorDesc().getLayout() == Layout::NCHW || getTensorDesc().getLayout() == Layout::NHWC) {
return createROI({roi.id, 0, roi.posY, roi.posX},
{roi.id + 1, getTensorDesc().getDims()[1], roi.posY + roi.sizeY, roi.posX + roi.sizeX});
}
IE_THROW(NotImplemented) << "createROI is not implemented for current type of Blob";
}
Blob::Ptr Blob::createROI(const std::vector<std::size_t>& begin, const std::vector<std::size_t>& end) const {
IE_THROW(NotImplemented) << "createROI is not implemented for current type of Blob or roi";
}
Blob::Ptr make_shared_blob(const Blob::Ptr& inputBlob, const ROI& roi) {
return inputBlob->createROI(roi);
}
Blob::Ptr make_shared_blob(const Blob::Ptr& inputBlob,
const std::vector<std::size_t>& begin,
const std::vector<std::size_t>& end) {
return inputBlob->createROI(begin, end);
}
} // namespace InferenceEngine

View File

@ -495,3 +495,16 @@ TensorSlice make_roi_slice(const TensorDesc& origDesc, const ROI& roi) {
TensorDesc InferenceEngine::make_roi_desc(const TensorDesc& origDesc, const ROI& roi, bool useOrigMemDesc) {
return make_roi_desc(origDesc, make_roi_slice(origDesc, roi), useOrigMemDesc);
}
TensorDesc InferenceEngine::make_roi_desc(const TensorDesc& origDesc,
const std::vector<size_t>& begin,
const std::vector<size_t>& end,
bool useOrigMemDesc) {
IE_ASSERT(begin.size() == end.size());
TensorSlice slice;
for (size_t i = 0; i < begin.size(); ++i) {
IE_ASSERT(end[i] >= begin[i]);
slice.emplace_back(begin[i], end[i] - begin[i]);
}
return make_roi_desc(origDesc, slice, useOrigMemDesc);
}

View File

@ -461,3 +461,119 @@ TEST_F(BlobTests, readRoiBlob) {
}
}
}
/////////////////////////////////////////
TEST_F(BlobTests, makeRangeRoiBlobNchw) {
// we create main blob with NCHW layout. We will crop ROI from this blob.
InferenceEngine::SizeVector dims = {1, 3, 6, 5}; // RGB picture of size (WxH) = 5x6
InferenceEngine::Blob::Ptr blob = InferenceEngine::make_shared_blob<uint8_t>(
InferenceEngine::TensorDesc(InferenceEngine::Precision::U8, dims, InferenceEngine::NCHW));
blob->allocate();
// create ROI blob based on the already created blob
InferenceEngine::ROI roi = {0, 2, 1, 2, 4}; // cropped picture with: id = 0, (x,y) = (2,1), sizeX (W) = 2, sizeY (H) = 4
InferenceEngine::Blob::Ptr roiBlob = make_shared_blob(blob,
{0, 0, roi.posY, roi.posX},
{1, 3, roi.posY + roi.sizeY, roi.posX + roi.sizeX});
// check that BlockingDesc is constructed properly for the ROI blob
InferenceEngine::SizeVector refDims = {1, 3, 4, 2};
InferenceEngine::SizeVector refOrder = {0, 1, 2, 3};
size_t refOffset = 7;
InferenceEngine::SizeVector refStrides = {90, 30, 5, 1};
ASSERT_EQ(roiBlob->getTensorDesc().getBlockingDesc().getBlockDims(), refDims);
ASSERT_EQ(roiBlob->getTensorDesc().getBlockingDesc().getOrder(), refOrder);
ASSERT_EQ(roiBlob->getTensorDesc().getBlockingDesc().getOffsetPadding(), refOffset);
ASSERT_EQ(roiBlob->getTensorDesc().getBlockingDesc().getStrides(), refStrides);
}
TEST_F(BlobTests, makeRangeRoiBlobNhwc) {
// we create main blob with NHWC layout. We will crop ROI from this blob.
InferenceEngine::SizeVector dims = {1, 3, 4, 8}; // RGB picture of size (WxH) = 8x4
InferenceEngine::Blob::Ptr blob = InferenceEngine::make_shared_blob<uint8_t>(
InferenceEngine::TensorDesc(InferenceEngine::Precision::U8, dims, InferenceEngine::NHWC));
blob->allocate();
// create ROI blob based on the already created blob
InferenceEngine::ROI roi = {0, 3, 2, 5, 2}; // cropped picture with: id = 0, (x,y) = (3,2), sizeX (W) = 5, sizeY (H) = 2
InferenceEngine::Blob::Ptr roiBlob = make_shared_blob(blob,
{0, 0, roi.posY, roi.posX},
{1, 3, roi.posY + roi.sizeY, roi.posX + roi.sizeX});
// check that BlockingDesc is constructed properly for the ROI blob
InferenceEngine::SizeVector refDims = {1, 2, 5, 3};
InferenceEngine::SizeVector refOrder = {0, 2, 3, 1};
size_t refOffset = 57;
InferenceEngine::SizeVector refStrides = {96, 24, 3, 1};
ASSERT_EQ(roiBlob->getTensorDesc().getBlockingDesc().getBlockDims(), refDims);
ASSERT_EQ(roiBlob->getTensorDesc().getBlockingDesc().getOrder(), refOrder);
ASSERT_EQ(roiBlob->getTensorDesc().getBlockingDesc().getOffsetPadding(), refOffset);
ASSERT_EQ(roiBlob->getTensorDesc().getBlockingDesc().getStrides(), refStrides);
}
TEST_F(BlobTests, makeRangeRoiBlobWrongSize) {
// we create main blob with NCHW layout. We will crop ROI from this blob.
InferenceEngine::SizeVector dims = {1, 3, 4, 4}; // RGB picture of size (WxH) = 4x4
InferenceEngine::Blob::Ptr blob = InferenceEngine::make_shared_blob<uint8_t>(
InferenceEngine::TensorDesc(InferenceEngine::Precision::U8, dims, InferenceEngine::NCHW));
blob->allocate();
// try to create ROI blob with wrong size
InferenceEngine::ROI roi = {0, 1, 1, 4, 4}; // cropped picture with: id = 0, (x,y) = (1,1), sizeX (W) = 4, sizeY (H) = 4
ASSERT_THROW(make_shared_blob(blob,
{0, 0, roi.posY, roi.posX},
{1, 3, roi.posY + roi.sizeY, roi.posX + roi.sizeX}), InferenceEngine::Exception);
}
TEST_F(BlobTests, readRangeRoiBlob) {
// Create original Blob
const auto origDesc =
InferenceEngine::TensorDesc(
InferenceEngine::Precision::I32,
{1, 3, 4, 8},
InferenceEngine::NCHW);
const auto origBlob =
InferenceEngine::make_shared_blob<int32_t>(origDesc);
origBlob->allocate();
// Fill the original Blob
{
auto origMemory = origBlob->wmap();
const auto origPtr = origMemory.as<int32_t*>();
ASSERT_NE(nullptr, origPtr);
for (size_t i = 0; i < origBlob->size(); ++i) {
origPtr[i] = i;
}
}
// Create ROI Blob
const auto roi = InferenceEngine::ROI(0, 4, 2, 4, 2);
const auto roiBlob = InferenceEngine::as<InferenceEngine::MemoryBlob>(origBlob->createROI(
{0, 0, roi.posY, roi.posX},
{1, 3, roi.posY + roi.sizeY, roi.posX + roi.sizeX}));
ASSERT_NE(nullptr, roiBlob);
// Read ROI Blob
{
const auto roiOffset = roiBlob->getTensorDesc().getBlockingDesc().getOffsetPadding();
auto roiMemory = roiBlob->rmap();
auto roiPtr = roiMemory.as<const int32_t*>();
ASSERT_NE(nullptr, roiPtr);
// Blob::rmap returns pointer to the original blob start, we have to add ROI offset manually.
roiPtr += roiOffset;
for (size_t i = 0; i < roiBlob->size(); ++i) {
ASSERT_EQ(roiPtr[i], i + roiOffset);
}
}
}