diff --git a/inference-engine/src/inference_engine/include/ie/ie_blob.h b/inference-engine/src/inference_engine/include/ie/ie_blob.h index 21342ae73da..aa625e6f33c 100644 --- a/inference-engine/src/inference_engine/include/ie/ie_blob.h +++ b/inference-engine/src/inference_engine/include/ie/ie_blob.h @@ -242,6 +242,18 @@ public: */ virtual Blob::Ptr createROI(const ROI& roi) const; + /** + * @brief Creates a blob describing given ROI object based on the current blob with memory sharing. + * + * Note: default implementation may throws "not implemented" exception. + * + * @param begin A ROI start coordinate + * @param end A ROI end coordinate + * + * @return A shared pointer to the newly created ROI blob. + */ + virtual Blob::Ptr createROI(const std::vector& begin, const std::vector& end) const; + protected: /** * @brief The tensor descriptor of the given blob. @@ -662,6 +674,10 @@ public: return Blob::Ptr(new TBlob(*this, roi)); } + Blob::Ptr createROI(const std::vector& begin, const std::vector& end) const override { + return Blob::Ptr(new TBlob(*this, begin, end)); + } + /** * @brief Gets BlobIterator for the data. * @@ -789,6 +805,20 @@ protected: _handle = origBlob._handle; } + + /** + * @brief Creates a blob from the existing blob with a given ROI + * @param origBlob An original blob + * @param begin ROI start coordinate + * @param end ROI end coordinate + */ + TBlob(const TBlob& origBlob, const std::vector& begin, const std::vector& end) + : MemoryBlob(make_roi_desc(origBlob.getTensorDesc(), begin, end, true)), + _allocator(origBlob._allocator) { + IE_ASSERT(origBlob._handle != nullptr) << "Original Blob must be allocated before ROI creation"; + + _handle = origBlob._handle; + } }; #ifdef __clang__ @@ -892,4 +922,15 @@ std::shared_ptr make_shared_blob(Args&&... args) { */ INFERENCE_ENGINE_API_CPP(Blob::Ptr) make_shared_blob(const Blob::Ptr& inputBlob, const ROI& roi); +/** + * @brief Creates a blob describing given ROI object based on the given blob with pre-allocated memory. + * + * @param inputBlob original blob with pre-allocated memory. + * @param begin A ROI object start coordinate inside of the original blob. + * @param end A ROI object end coordinate inside of the original blob. + * @return A shared pointer to the newly created blob. + */ +INFERENCE_ENGINE_API_CPP(Blob::Ptr) +make_shared_blob(const Blob::Ptr& inputBlob, const std::vector& begin, const std::vector& end); + } // namespace InferenceEngine diff --git a/inference-engine/src/inference_engine/include/ie/ie_layouts.h b/inference-engine/src/inference_engine/include/ie/ie_layouts.h index f1f8ef382ce..76aa5d62ae9 100644 --- a/inference-engine/src/inference_engine/include/ie/ie_layouts.h +++ b/inference-engine/src/inference_engine/include/ie/ie_layouts.h @@ -374,4 +374,21 @@ struct ROI { */ INFERENCE_ENGINE_API_CPP(TensorDesc) make_roi_desc(const TensorDesc& origDesc, const ROI& roi, bool useOrigMemDesc); +/** + * @brief Creates a TensorDesc object for ROI. + * + * @param origDesc original TensorDesc object. + * @param begin start coordinate of ROI object inside of the original object. + * @param end end coordinate of ROI object inside of the original object. + * @param useOrigMemDesc Flag to use original memory description (strides/offset). + * Should be set if the new TensorDesc describes shared memory. + * + * @return A newly created TensorDesc object representing ROI. + */ +INFERENCE_ENGINE_API_CPP(TensorDesc) +make_roi_desc(const TensorDesc& origDesc, + const std::vector& begin, + const std::vector& end, + bool useOrigMemDesc); + } // namespace InferenceEngine diff --git a/inference-engine/src/inference_engine/src/ie_blob_common.cpp b/inference-engine/src/inference_engine/src/ie_blob_common.cpp index 444d8e3d232..fc5d9b9144f 100644 --- a/inference-engine/src/inference_engine/src/ie_blob_common.cpp +++ b/inference-engine/src/inference_engine/src/ie_blob_common.cpp @@ -9,13 +9,26 @@ #include "ie_blob.h" namespace InferenceEngine { - -Blob::Ptr Blob::createROI(const ROI&) const { +Blob::Ptr Blob::createROI(const ROI& roi) const { + if (getTensorDesc().getLayout() == Layout::NCHW || getTensorDesc().getLayout() == Layout::NHWC) { + return createROI({roi.id, 0, roi.posY, roi.posX}, + {roi.id + 1, getTensorDesc().getDims()[1], roi.posY + roi.sizeY, roi.posX + roi.sizeX}); + } IE_THROW(NotImplemented) << "createROI is not implemented for current type of Blob"; } +Blob::Ptr Blob::createROI(const std::vector& begin, const std::vector& end) const { + IE_THROW(NotImplemented) << "createROI is not implemented for current type of Blob or roi"; +} + Blob::Ptr make_shared_blob(const Blob::Ptr& inputBlob, const ROI& roi) { return inputBlob->createROI(roi); } +Blob::Ptr make_shared_blob(const Blob::Ptr& inputBlob, + const std::vector& begin, + const std::vector& end) { + return inputBlob->createROI(begin, end); +} + } // namespace InferenceEngine diff --git a/inference-engine/src/inference_engine/src/ie_layouts.cpp b/inference-engine/src/inference_engine/src/ie_layouts.cpp index 9cb98c67152..f9f906cc34f 100644 --- a/inference-engine/src/inference_engine/src/ie_layouts.cpp +++ b/inference-engine/src/inference_engine/src/ie_layouts.cpp @@ -495,3 +495,16 @@ TensorSlice make_roi_slice(const TensorDesc& origDesc, const ROI& roi) { TensorDesc InferenceEngine::make_roi_desc(const TensorDesc& origDesc, const ROI& roi, bool useOrigMemDesc) { return make_roi_desc(origDesc, make_roi_slice(origDesc, roi), useOrigMemDesc); } + +TensorDesc InferenceEngine::make_roi_desc(const TensorDesc& origDesc, + const std::vector& begin, + const std::vector& end, + bool useOrigMemDesc) { + IE_ASSERT(begin.size() == end.size()); + TensorSlice slice; + for (size_t i = 0; i < begin.size(); ++i) { + IE_ASSERT(end[i] >= begin[i]); + slice.emplace_back(begin[i], end[i] - begin[i]); + } + return make_roi_desc(origDesc, slice, useOrigMemDesc); +} diff --git a/inference-engine/tests/unit/inference_engine/ie_blob_test.cpp b/inference-engine/tests/unit/inference_engine/ie_blob_test.cpp index d84f185727b..0045080c4d2 100644 --- a/inference-engine/tests/unit/inference_engine/ie_blob_test.cpp +++ b/inference-engine/tests/unit/inference_engine/ie_blob_test.cpp @@ -461,3 +461,119 @@ TEST_F(BlobTests, readRoiBlob) { } } } + +///////////////////////////////////////// + +TEST_F(BlobTests, makeRangeRoiBlobNchw) { + // we create main blob with NCHW layout. We will crop ROI from this blob. + InferenceEngine::SizeVector dims = {1, 3, 6, 5}; // RGB picture of size (WxH) = 5x6 + InferenceEngine::Blob::Ptr blob = InferenceEngine::make_shared_blob( + InferenceEngine::TensorDesc(InferenceEngine::Precision::U8, dims, InferenceEngine::NCHW)); + blob->allocate(); + + // create ROI blob based on the already created blob + InferenceEngine::ROI roi = {0, 2, 1, 2, 4}; // cropped picture with: id = 0, (x,y) = (2,1), sizeX (W) = 2, sizeY (H) = 4 + InferenceEngine::Blob::Ptr roiBlob = make_shared_blob(blob, + {0, 0, roi.posY, roi.posX}, + {1, 3, roi.posY + roi.sizeY, roi.posX + roi.sizeX}); + + // check that BlockingDesc is constructed properly for the ROI blob + InferenceEngine::SizeVector refDims = {1, 3, 4, 2}; + InferenceEngine::SizeVector refOrder = {0, 1, 2, 3}; + size_t refOffset = 7; + InferenceEngine::SizeVector refStrides = {90, 30, 5, 1}; + ASSERT_EQ(roiBlob->getTensorDesc().getBlockingDesc().getBlockDims(), refDims); + ASSERT_EQ(roiBlob->getTensorDesc().getBlockingDesc().getOrder(), refOrder); + ASSERT_EQ(roiBlob->getTensorDesc().getBlockingDesc().getOffsetPadding(), refOffset); + ASSERT_EQ(roiBlob->getTensorDesc().getBlockingDesc().getStrides(), refStrides); +} + +TEST_F(BlobTests, makeRangeRoiBlobNhwc) { + // we create main blob with NHWC layout. We will crop ROI from this blob. + InferenceEngine::SizeVector dims = {1, 3, 4, 8}; // RGB picture of size (WxH) = 8x4 + InferenceEngine::Blob::Ptr blob = InferenceEngine::make_shared_blob( + InferenceEngine::TensorDesc(InferenceEngine::Precision::U8, dims, InferenceEngine::NHWC)); + blob->allocate(); + + // create ROI blob based on the already created blob + InferenceEngine::ROI roi = {0, 3, 2, 5, 2}; // cropped picture with: id = 0, (x,y) = (3,2), sizeX (W) = 5, sizeY (H) = 2 + InferenceEngine::Blob::Ptr roiBlob = make_shared_blob(blob, + {0, 0, roi.posY, roi.posX}, + {1, 3, roi.posY + roi.sizeY, roi.posX + roi.sizeX}); + + // check that BlockingDesc is constructed properly for the ROI blob + InferenceEngine::SizeVector refDims = {1, 2, 5, 3}; + InferenceEngine::SizeVector refOrder = {0, 2, 3, 1}; + size_t refOffset = 57; + InferenceEngine::SizeVector refStrides = {96, 24, 3, 1}; + ASSERT_EQ(roiBlob->getTensorDesc().getBlockingDesc().getBlockDims(), refDims); + ASSERT_EQ(roiBlob->getTensorDesc().getBlockingDesc().getOrder(), refOrder); + ASSERT_EQ(roiBlob->getTensorDesc().getBlockingDesc().getOffsetPadding(), refOffset); + ASSERT_EQ(roiBlob->getTensorDesc().getBlockingDesc().getStrides(), refStrides); +} + +TEST_F(BlobTests, makeRangeRoiBlobWrongSize) { + // we create main blob with NCHW layout. We will crop ROI from this blob. + InferenceEngine::SizeVector dims = {1, 3, 4, 4}; // RGB picture of size (WxH) = 4x4 + InferenceEngine::Blob::Ptr blob = InferenceEngine::make_shared_blob( + InferenceEngine::TensorDesc(InferenceEngine::Precision::U8, dims, InferenceEngine::NCHW)); + blob->allocate(); + + // try to create ROI blob with wrong size + InferenceEngine::ROI roi = {0, 1, 1, 4, 4}; // cropped picture with: id = 0, (x,y) = (1,1), sizeX (W) = 4, sizeY (H) = 4 + ASSERT_THROW(make_shared_blob(blob, + {0, 0, roi.posY, roi.posX}, + {1, 3, roi.posY + roi.sizeY, roi.posX + roi.sizeX}), InferenceEngine::Exception); +} + +TEST_F(BlobTests, readRangeRoiBlob) { + // Create original Blob + + const auto origDesc = + InferenceEngine::TensorDesc( + InferenceEngine::Precision::I32, + {1, 3, 4, 8}, + InferenceEngine::NCHW); + + const auto origBlob = + InferenceEngine::make_shared_blob(origDesc); + origBlob->allocate(); + + // Fill the original Blob + + { + auto origMemory = origBlob->wmap(); + const auto origPtr = origMemory.as(); + ASSERT_NE(nullptr, origPtr); + + for (size_t i = 0; i < origBlob->size(); ++i) { + origPtr[i] = i; + } + } + + // Create ROI Blob + + const auto roi = InferenceEngine::ROI(0, 4, 2, 4, 2); + + const auto roiBlob = InferenceEngine::as(origBlob->createROI( + {0, 0, roi.posY, roi.posX}, + {1, 3, roi.posY + roi.sizeY, roi.posX + roi.sizeX})); + ASSERT_NE(nullptr, roiBlob); + + // Read ROI Blob + + { + const auto roiOffset = roiBlob->getTensorDesc().getBlockingDesc().getOffsetPadding(); + + auto roiMemory = roiBlob->rmap(); + auto roiPtr = roiMemory.as(); + ASSERT_NE(nullptr, roiPtr); + + // Blob::rmap returns pointer to the original blob start, we have to add ROI offset manually. + roiPtr += roiOffset; + + for (size_t i = 0; i < roiBlob->size(); ++i) { + ASSERT_EQ(roiPtr[i], i + roiOffset); + } + } +}