From d225ba6e5369b6697a99e8b855b3aeb3941b5859 Mon Sep 17 00:00:00 2001 From: Rafik Saliev Date: Tue, 3 Nov 2020 19:19:26 +0100 Subject: [PATCH] [IE] Add batched blob support (#2203) * [IE] Add batched blob support New `class BatchedBlob : public CompoundBlob` defined to allow to pass multiple blobs as 1 InferRequest input. Motivation: There is the special user case when a number of plain images (e.g. `NV12Blob`) should be passed as one input for network which batch size > 1. `class CompoundBlob` is not applicable for such cases due to: 1. `NV12Blob` is `CompoundBlob` which prevents to combine multiple NV12 images to a CompoundBlob 2. The default behavior in most of plugins - do not accept generic CompoundBlob as `SetBlob()` argument Adding `SetBlob(name, vector...)` to `class IInferRequest`, `class InferRequest`, `class IInferRequestInternal`, ... - is not effective solution due to limited and specific use cases for `batched inputs`. + Apply rule-of-zero to CompoundBlob and inherited classes. * Add "BATCHED_BLOB" optimization capability metric * Add BatchedBlob usage to hello_nv12_input_classification * Apply offline code review outcome: 1. Revert CompoundBlob public .ctors signatures 2. Remove 'workaround' .ctor for `BatchedBlob` 3. Revert tensor descriptors of `I420Blob` `NV12Blob` back to the 'fake' value. * Code review fix * Add functional tests for CPU, GPU, MULTI, HETERO * update doc comment * Apply code review change requests. --- inference-engine/include/ie_compound_blob.h | 136 ++++------- inference-engine/include/ie_plugin_config.hpp | 2 + .../hello_nv12_input_classification/main.cpp | 226 ++++++++++++++---- .../src/inference_engine/ie_compound_blob.cpp | 146 +++++++---- .../behavior/set_blob_of_kind.cpp | 39 +++ .../behavior/set_blob_of_kind.cpp | 23 ++ .../include/behavior/set_blob_of_kind.hpp | 32 +++ .../shared/src/behavior/set_blob_of_kind.cpp | 106 ++++++++ .../functional_test_utils/blob_utils.hpp | 43 ++++ 9 files changed, 572 insertions(+), 181 deletions(-) create mode 100644 inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/set_blob_of_kind.cpp create mode 100644 inference-engine/tests/functional/plugin/gpu/shared_tests_instances/behavior/set_blob_of_kind.cpp create mode 100644 inference-engine/tests/functional/plugin/shared/include/behavior/set_blob_of_kind.hpp create mode 100644 inference-engine/tests/functional/plugin/shared/src/behavior/set_blob_of_kind.cpp diff --git a/inference-engine/include/ie_compound_blob.h b/inference-engine/include/ie_compound_blob.h index 5ccf8c7c70f..ff5d71e4078 100644 --- a/inference-engine/include/ie_compound_blob.h +++ b/inference-engine/include/ie_compound_blob.h @@ -34,31 +34,6 @@ public: */ using CPtr = std::shared_ptr; - /** - * @brief A virtual destructor - */ - virtual ~CompoundBlob() = default; - - /** - * @brief A copy constructor - */ - CompoundBlob(const CompoundBlob& blob); - - /** - * @brief A copy assignment operator - */ - CompoundBlob& operator=(const CompoundBlob& blob) = default; - - /** - * @brief A move constructor - */ - CompoundBlob(CompoundBlob&& blob); - - /** - * @brief A move assignment operator - */ - CompoundBlob& operator=(CompoundBlob&& blob) = default; - /** * @brief Constructs a compound blob from a vector of blobs * @@ -121,9 +96,11 @@ public: protected: /** - * @brief A default constructor + * @brief Constructs a compound blob with specified descriptor + * + * @param tensorDesc A tensor descriptor for the compound blob */ - CompoundBlob(); + explicit CompoundBlob(const TensorDesc& tensorDesc); /** * @brief Compound blob container for underlying blobs @@ -156,11 +133,6 @@ public: */ using CPtr = std::shared_ptr; - /** - * @brief A deleted default constructor - */ - NV12Blob() = delete; - /** * @brief Constructs NV12 blob from two planes Y and UV * @@ -177,31 +149,6 @@ public: */ NV12Blob(Blob::Ptr&& y, Blob::Ptr&& uv); - /** - * @brief A virtual destructor - */ - virtual ~NV12Blob() = default; - - /** - * @brief A copy constructor - */ - NV12Blob(const NV12Blob& blob) = default; - - /** - * @brief A copy assignment operator - */ - NV12Blob& operator=(const NV12Blob& blob) = default; - - /** - * @brief A move constructor - */ - NV12Blob(NV12Blob&& blob) = default; - - /** - * @brief A move assignment operator - */ - NV12Blob& operator=(NV12Blob&& blob) = default; - /** * @brief Returns a shared pointer to Y plane */ @@ -240,11 +187,6 @@ public: */ using CPtr = std::shared_ptr; - /** - * @brief A deleted default constructor - */ - I420Blob() = delete; - /** * @brief Constructs I420 blob from three planes Y, U and V * @param y Blob object that represents Y plane in I420 color format @@ -261,32 +203,6 @@ public: */ I420Blob(Blob::Ptr&& y, Blob::Ptr&& u, Blob::Ptr&& v); - /** - * @brief A virtual destructor. It is made out of line for RTTI to - * work correctly on some platforms. - */ - virtual ~I420Blob(); - - /** - * @brief A copy constructor - */ - I420Blob(const I420Blob& blob) = default; - - /** - * @brief A copy assignment operator - */ - I420Blob& operator=(const I420Blob& blob) = default; - - /** - * @brief A move constructor - */ - I420Blob(I420Blob&& blob) = default; - - /** - * @brief A move assignment operator - */ - I420Blob& operator=(I420Blob&& blob) = default; - /** * @brief Returns a reference to shared pointer to Y plane * @@ -349,4 +265,48 @@ public: Blob::Ptr createROI(const ROI& roi) const override; }; + +/** + * @brief This class represents a blob that contains other blobs - one per batch + * @details Plugin which supports BatchedBlob input should report BATCHED_BLOB + * in the OPTIMIZATION_CAPABILITIES metric. + */ +class INFERENCE_ENGINE_API_CLASS(BatchedBlob) : public CompoundBlob { + public: + /** + * @brief A smart pointer to the BatchedBlob object + */ + using Ptr = std::shared_ptr; + + /** + * @brief A smart pointer to the const BatchedBlob object + */ + using CPtr = std::shared_ptr; + + /** + * @brief Constructs a batched blob from a vector of blobs + * @details All passed blobs should meet following requirements: + * - all blobs have equal tensor descriptors, + * - blobs layouts should be one of: NCHW, NHWC, NCDHW, NDHWC, NC, CN, C, CHW + * - batch dimensions should be equal to 1 or not defined (C, CHW). + * Resulting blob's tensor descriptor is constructed using tensor descriptors + * of passed blobs by setting batch dimension to blobs.size() + * + * @param blobs A vector of blobs that is copied to this object + */ + explicit BatchedBlob(const std::vector& blobs); + + /** + * @brief Constructs a batched blob from a vector of blobs + * @details All passed blobs should meet following requirements: + * - all blobs have equal tensor descriptors, + * - blobs layouts should be one of: NCHW, NHWC, NCDHW, NDHWC, NC, CN, C, CHW + * - batch dimensions should be equal to 1 or not defined (C, CHW). + * Resulting blob's tensor descriptor is constructed using tensor descriptors + * of passed blobs by setting batch dimension to blobs.size() + * + * @param blobs A vector of blobs that is moved to this object + */ + explicit BatchedBlob(std::vector&& blobs); +}; } // namespace InferenceEngine diff --git a/inference-engine/include/ie_plugin_config.hpp b/inference-engine/include/ie_plugin_config.hpp index f234b2780c1..a59320420fc 100644 --- a/inference-engine/include/ie_plugin_config.hpp +++ b/inference-engine/include/ie_plugin_config.hpp @@ -95,6 +95,7 @@ DECLARE_METRIC_KEY(FULL_DEVICE_NAME, std::string); * - "INT8" - device can support models with INT8 layers * - "BIN" - device can support models with BIN layers * - "WINOGRAD" - device can support models where convolution implemented via Winograd transformations + * - "BATCHED_BLOB" - device can support BatchedBlob */ DECLARE_METRIC_KEY(OPTIMIZATION_CAPABILITIES, std::vector); @@ -104,6 +105,7 @@ DECLARE_METRIC_VALUE(FP16); DECLARE_METRIC_VALUE(INT8); DECLARE_METRIC_VALUE(BIN); DECLARE_METRIC_VALUE(WINOGRAD); +DECLARE_METRIC_VALUE(BATCHED_BLOB); /** * @brief Metric to provide information about a range for streams on platforms where streams are supported. diff --git a/inference-engine/samples/hello_nv12_input_classification/main.cpp b/inference-engine/samples/hello_nv12_input_classification/main.cpp index 9eafbafe1cc..dd409fc4fe2 100644 --- a/inference-engine/samples/hello_nv12_input_classification/main.cpp +++ b/inference-engine/samples/hello_nv12_input_classification/main.cpp @@ -14,6 +14,15 @@ #include #include +#include + +#include +#ifdef _WIN32 +#include +#else +#include +#endif + using namespace InferenceEngine; @@ -49,29 +58,80 @@ std::pair parseImageSize(const std::string& size_string) { return {width, height}; } +// Comparing to samples/args_helper.hpp, this version filters files by ".yuv" extension +/** +* @brief This function checks input args and existence of specified files in a given folder +* @param path path to a file to be checked for existence +* @return files updated vector of verified input files +*/ +std::vector readInputFileNames(const std::string& path) { + struct stat sb; + if (stat(path.c_str(), &sb) != 0) { + slog::warn << "File " << path << " cannot be opened!" << slog::endl; + return {}; + } + + std::vector files; + + if (S_ISDIR(sb.st_mode)) { + DIR *dp = opendir(path.c_str()); + if (dp == nullptr) { + slog::warn << "Directory " << path << " cannot be opened!" << slog::endl; + return {}; + } + + for (struct dirent* ep = readdir(dp); ep != nullptr; ep = readdir(dp)) { + std::string fileName = ep->d_name; + if (fileName == "." || fileName == ".." || fileName.substr(fileName.size() - 4) != ".yuv") continue; + files.push_back(path + "/" + ep->d_name); + } + closedir(dp); + } else { + files.push_back(path); + } + + if (files.size() < 20) { + slog::info << "Files were added: " << files.size() << slog::endl; + for (std::string filePath : files) { + slog::info << " " << filePath << slog::endl; + } + } else { + slog::info << "Files were added: " << files.size() << ". Too many to display each of them." << slog::endl; + } + + return files; +} + +using UString = std::basic_string; + /** * \brief Read image data from file - * @return buffer containing the image data + * @return buffers containing the images data */ -std::unique_ptr readImageDataFromFile(const std::string& image_path, size_t size) { - std::ifstream file(image_path, std::ios_base::ate | std::ios_base::binary); - if (!file.good() || !file.is_open()) { - std::stringstream err; - err << "Cannot access input image file. File path: " << image_path; - throw std::runtime_error(err.str()); - } +std::vector readImagesDataFromFiles(const std::vector& files, size_t size) { + std::vector result; - const size_t file_size = file.tellg(); - if (file_size < size) { - std::stringstream err; - err << "Invalid read size provided. File size: " << file_size << ", to read: " << size; - throw std::runtime_error(err.str()); - } - file.seekg(0); + for (const auto& image_path : files) { + std::ifstream file(image_path, std::ios_base::ate | std::ios_base::binary); + if (!file.good() || !file.is_open()) { + std::stringstream err; + err << "Cannot access input image file. File path: " << image_path; + throw std::runtime_error(err.str()); + } - std::unique_ptr data(new unsigned char[size]); - file.read(reinterpret_cast(data.get()), size); - return data; + const size_t file_size = file.tellg(); + if (file_size < size) { + std::stringstream err; + err << "Invalid read size provided. File size: " << file_size << ", to read: " << size; + throw std::runtime_error(err.str()); + } + file.seekg(0); + + UString data(size, 0); + file.read(reinterpret_cast(&data[0]), size); + result.push_back(std::move(data)); + } + return result; } /** @@ -89,6 +149,49 @@ void setBatchSize(CNNNetwork& network, size_t batch) { network.reshape(inputShapes); } +std::vector readInputBlobs(std::vector& data, size_t width, size_t height) { + // read image with size converted to NV12 data size: height(NV12) = 3 / 2 * logical height + + // Create tensor descriptors for Y and UV blobs + const InferenceEngine::TensorDesc y_plane_desc(InferenceEngine::Precision::U8, {1, 1, height, width}, + InferenceEngine::Layout::NHWC); + const InferenceEngine::TensorDesc uv_plane_desc(InferenceEngine::Precision::U8, {1, 2, height / 2, width / 2}, + InferenceEngine::Layout::NHWC); + const size_t offset = width * height; + + std::vector blobs; + for (auto& buf : data) { + // --------------------------- Create a blob to hold the NV12 input data ------------------------------- + auto ptr = &buf[0]; + + // Create blob for Y plane from raw data + Blob::Ptr y_blob = make_shared_blob(y_plane_desc, ptr); + // Create blob for UV plane from raw data + Blob::Ptr uv_blob = make_shared_blob(uv_plane_desc, ptr + offset); + // Create NV12Blob from Y and UV blobs + blobs.emplace_back(make_shared_blob(y_blob, uv_blob)); + } + + return blobs; +} + +bool isBatchedBlobSupported(const Core& ie, const std::string& device_name) { + const std::vector supported_metrics = + ie.GetMetric(device_name, METRIC_KEY(SUPPORTED_METRICS)); + + if (std::find(supported_metrics.begin(), supported_metrics.end(), + METRIC_KEY(OPTIMIZATION_CAPABILITIES)) == + supported_metrics.end()) { + return false; + } + + const std::vector optimization_caps = + ie.GetMetric(device_name, METRIC_KEY(OPTIMIZATION_CAPABILITIES)); + + return std::find(optimization_caps.begin(), optimization_caps.end(), + METRIC_VALUE(BATCHED_BLOB)) != optimization_caps.end(); +} + /** * @brief The entry point of the Inference Engine sample application */ @@ -96,7 +199,7 @@ int main(int argc, char *argv[]) { try { // ------------------------------ Parsing and validatiing input arguments------------------------------ if (argc != 5) { - std::cout << "Usage : ./hello_nv12_input_classification " + std::cout << "Usage : " << argv[0] << " " << std::endl; return EXIT_FAILURE; } @@ -108,13 +211,26 @@ int main(int argc, char *argv[]) { const std::string device_name{argv[4]}; // ----------------------------------------------------------------------------------------------------- + // --------------------------- 0. Read image names ----------------------------------------------------- + auto image_names = readInputFileNames(input_image_path); + + if (image_names.empty()) { + throw std::invalid_argument("images not found"); + } + // ----------------------------------------------------------------------------------------------------- + // --------------------------- 1. Load inference engine ------------------------------------------------ Core ie; // ----------------------------------------------------------------------------------------------------- // 2. Read a model in OpenVINO Intermediate Representation (.xml and .bin files) or ONNX (.onnx file) format CNNNetwork network = ie.ReadNetwork(input_model); - setBatchSize(network, 1); + // ----------------------------------------------------------------------------------------------------- + + // --------------------------- 2. Set model batch size ------------------------------------------------- + size_t batch_size = isBatchedBlobSupported(ie, device_name) ? image_names.size() : 1; + std::cout << "Setting network batch size to " << batch_size << std::endl; + setBatchSize(network, batch_size); // ----------------------------------------------------------------------------------------------------- // --------------------------- 3. Configure input and output ------------------------------------------- @@ -154,40 +270,54 @@ int main(int argc, char *argv[]) { // ----------------------------------------------------------------------------------------------------- // --------------------------- 6. Prepare input -------------------------------------------------------- - // read image with size converted to NV12 data size: height(NV12) = 3 / 2 * logical height - auto image_buf = readImageDataFromFile(input_image_path, input_width * (input_height * 3 / 2)); + auto image_bufs = readImagesDataFromFiles(image_names, input_width * (input_height * 3 / 2)); - // --------------------------- Create a blob to hold the NV12 input data ------------------------------- - // Create tensor descriptors for Y and UV blobs - InferenceEngine::TensorDesc y_plane_desc(InferenceEngine::Precision::U8, - {1, 1, input_height, input_width}, InferenceEngine::Layout::NHWC); - InferenceEngine::TensorDesc uv_plane_desc(InferenceEngine::Precision::U8, - {1, 2, input_height / 2, input_width / 2}, InferenceEngine::Layout::NHWC); - const size_t offset = input_width * input_height; + auto inputs = readInputBlobs(image_bufs, input_width, input_height); - // Create blob for Y plane from raw data - Blob::Ptr y_blob = make_shared_blob(y_plane_desc, image_buf.get()); - // Create blob for UV plane from raw data - Blob::Ptr uv_blob = make_shared_blob(uv_plane_desc, image_buf.get() + offset); - // Create NV12Blob from Y and UV blobs - Blob::Ptr input = make_shared_blob(y_blob, uv_blob); + // If batch_size > 1 => batched blob supported => replace all inputs by a BatchedBlob + if (batch_size > 1) { + assert(batch_size == inputs.size()); + std::cout << "Infer using BatchedBlob of NV12 images." << std::endl; + Blob::Ptr batched_input = make_shared_blob(inputs); + inputs = {batched_input}; + } - // --------------------------- Set the input blob to the InferRequest ---------------------------------- - infer_request.SetBlob(input_name, input); - // ----------------------------------------------------------------------------------------------------- + /** Read labels from file (e.x. AlexNet.labels) **/ + std::string labelFileName = fileNameNoExt(input_model) + ".labels"; + std::vector labels; - // --------------------------- 7. Do inference --------------------------------------------------------- - /* Running the request synchronously */ - infer_request.Infer(); - // ----------------------------------------------------------------------------------------------------- + std::ifstream inputFile; + inputFile.open(labelFileName, std::ios::in); + if (inputFile.is_open()) { + std::string strLine; + while (std::getline(inputFile, strLine)) { + trim(strLine); + labels.push_back(strLine); + } + } - // --------------------------- 8. Process output ------------------------------------------------------- - Blob::Ptr output = infer_request.GetBlob(output_name); + for (size_t i = 0; i < inputs.size(); i++) { + const auto& input = inputs[i]; + // --------------------------- Set the input blob to the InferRequest ------------------------------ + infer_request.SetBlob(input_name, input); + // ------------------------------------------------------------------------------------------------- - // Print classification results - ClassificationResult classificationResult(output, {input_image_path}); - classificationResult.print(); - // ----------------------------------------------------------------------------------------------------- + // --------------------------- 7. Do inference ----------------------------------------------------- + /* Running the request synchronously */ + infer_request.Infer(); + // ------------------------------------------------------------------------------------------------- + + // --------------------------- 8. Process output --------------------------------------------------- + Blob::Ptr output = infer_request.GetBlob(output_name); + + // Print classification results + const auto names_offset = image_names.begin() + batch_size * i; + std::vector names(names_offset, names_offset + batch_size); + + ClassificationResult classificationResult(output, names, batch_size, 10, labels); + classificationResult.print(); + // ------------------------------------------------------------------------------------------------- + } } catch (const std::exception & ex) { std::cerr << ex.what() << std::endl; return EXIT_FAILURE; diff --git a/inference-engine/src/inference_engine/ie_compound_blob.cpp b/inference-engine/src/inference_engine/ie_compound_blob.cpp index 5d35f531acf..15e94a952de 100644 --- a/inference-engine/src/inference_engine/ie_compound_blob.cpp +++ b/inference-engine/src/inference_engine/ie_compound_blob.cpp @@ -18,7 +18,7 @@ namespace InferenceEngine { namespace { -void verifyNV12BlobInput(const Blob::Ptr& y, const Blob::Ptr& uv) { +TensorDesc verifyNV12BlobInput(const Blob::Ptr& y, const Blob::Ptr& uv) { // Y and UV must be valid pointers if (y == nullptr || uv == nullptr) { THROW_IE_EXCEPTION << "Y and UV planes must be valid Blob objects"; @@ -91,9 +91,11 @@ void verifyNV12BlobInput(const Blob::Ptr& y, const Blob::Ptr& uv) { THROW_IE_EXCEPTION << "The width of the Y plane must be equal to (2 * the width of the UV plane), actual: " << yDims[3] << "(Y plane) and " << uvDims[3] << "(UV plane)"; } + + return {Precision::U8, {}, Layout::NCHW}; } -void verifyI420BlobInput(const Blob::Ptr& y, const Blob::Ptr& u, const Blob::Ptr& v) { +TensorDesc verifyI420BlobInput(const Blob::Ptr& y, const Blob::Ptr& u, const Blob::Ptr& v) { // Y and UV must be valid pointers if (y == nullptr || u == nullptr || v == nullptr) { THROW_IE_EXCEPTION << "Y, U and V planes must be valid Blob objects"; @@ -190,21 +192,85 @@ void verifyI420BlobInput(const Blob::Ptr& y, const Blob::Ptr& u, const Blob::Ptr THROW_IE_EXCEPTION << "The width of the Y plane must be equal to (2 * the width of the UV plane), actual: " << yDims[3] << "(Y plane) and " << vDims[3] << "(V plane)"; } + + return {Precision::U8, {}, Layout::NCHW}; +} + +TensorDesc getBlobTensorDesc(const Blob::Ptr& blob) { + if (auto nv12 = dynamic_cast(blob.get())) { + auto yDesc = nv12->y()->getTensorDesc(); + yDesc.getDims()[1] += 2; + return yDesc; + } + + if (auto i420 = dynamic_cast(blob.get())) { + auto yDesc = i420->y()->getTensorDesc(); + yDesc.getDims()[1] += 2; + return yDesc; + } + + return blob->getTensorDesc(); +} + +TensorDesc verifyBatchedBlobInput(const std::vector& blobs) { + // verify invariants + if (blobs.empty()) { + THROW_IE_EXCEPTION << "BatchedBlob cannot be created from empty vector of Blob, Please, make sure vector contains at least one Blob"; + } + + // Cannot create a compound blob from nullptr Blob objects + if (std::any_of(blobs.begin(), blobs.end(), [](const Blob::Ptr& blob) { + return blob == nullptr; + })) { + THROW_IE_EXCEPTION << "Cannot create a compound blob from nullptr Blob objects"; + } + + const auto subBlobDesc = getBlobTensorDesc(blobs[0]); + + if (std::any_of(blobs.begin(), blobs.end(), + [&subBlobDesc](const Blob::Ptr& blob) { + return getBlobTensorDesc(blob) != subBlobDesc; + })) { + THROW_IE_EXCEPTION << "All blobs tensors should be equal"; + } + + auto subBlobLayout = subBlobDesc.getLayout(); + + auto blobLayout = Layout::ANY; + SizeVector blobDims = subBlobDesc.getDims(); + switch (subBlobLayout) { + case NCHW: + case NHWC: + case NCDHW: + case NDHWC: + case NC: + case CN: + blobLayout = subBlobLayout; + if (blobDims[0] != 1) { + THROW_IE_EXCEPTION << "All blobs should be batch 1"; + } + blobDims[0] = blobs.size(); + break; + case C: + blobLayout = NC; + blobDims.insert(blobDims.begin(), blobs.size()); + break; + case CHW: + blobLayout = NCHW; + blobDims.insert(blobDims.begin(), blobs.size()); + break; + default: + THROW_IE_EXCEPTION << "Unsupported sub-blobs layout - to be one of: [NCHW, NHWC, NCDHW, NDHWC, NC, CN, C, CHW]"; + } + + return TensorDesc{subBlobDesc.getPrecision(), blobDims, blobLayout}; } } // anonymous namespace -CompoundBlob::CompoundBlob(): Blob(TensorDesc(Precision::UNSPECIFIED, {}, Layout::ANY)) {} +CompoundBlob::CompoundBlob(const TensorDesc& tensorDesc): Blob(tensorDesc) {} -CompoundBlob::CompoundBlob(const CompoundBlob& blob): CompoundBlob() { - this->_blobs = blob._blobs; -} - -CompoundBlob::CompoundBlob(CompoundBlob&& blob): CompoundBlob() { - this->_blobs = std::move(blob._blobs); -} - -CompoundBlob::CompoundBlob(const std::vector& blobs): CompoundBlob() { +CompoundBlob::CompoundBlob(const std::vector& blobs): CompoundBlob(TensorDesc{}) { // Cannot create a compound blob from nullptr Blob objects if (std::any_of(blobs.begin(), blobs.end(), [](const Blob::Ptr& blob) { return blob == nullptr; @@ -223,7 +289,7 @@ CompoundBlob::CompoundBlob(const std::vector& blobs): CompoundBlob() this->_blobs = blobs; } -CompoundBlob::CompoundBlob(std::vector&& blobs): CompoundBlob() { +CompoundBlob::CompoundBlob(std::vector&& blobs): CompoundBlob(TensorDesc{}) { // Cannot create a compound blob from nullptr Blob objects if (std::any_of(blobs.begin(), blobs.end(), [](const Blob::Ptr& blob) { return blob == nullptr; @@ -295,22 +361,14 @@ void* CompoundBlob::getHandle() const noexcept { return nullptr; } -NV12Blob::NV12Blob(const Blob::Ptr& y, const Blob::Ptr& uv) { - // verify data is correct - verifyNV12BlobInput(y, uv); - // set blobs - _blobs.emplace_back(y); - _blobs.emplace_back(uv); - tensorDesc = TensorDesc(Precision::U8, {}, Layout::NCHW); +NV12Blob::NV12Blob(const Blob::Ptr& y, const Blob::Ptr& uv) + : CompoundBlob(verifyNV12BlobInput(y, uv)) { + this->_blobs = {y, uv}; } -NV12Blob::NV12Blob(Blob::Ptr&& y, Blob::Ptr&& uv) { - // verify data is correct - verifyNV12BlobInput(y, uv); - // set blobs - _blobs.emplace_back(std::move(y)); - _blobs.emplace_back(std::move(uv)); - tensorDesc = TensorDesc(Precision::U8, {}, Layout::NCHW); +NV12Blob::NV12Blob(Blob::Ptr&& y, Blob::Ptr&& uv) + : CompoundBlob(verifyNV12BlobInput(y, uv)) { + this->_blobs = {std::move(y), std::move(uv)}; } Blob::Ptr& NV12Blob::y() noexcept { @@ -346,28 +404,16 @@ Blob::Ptr NV12Blob::createROI(const ROI& roi) const { return std::make_shared(yRoiBlob, uvRoiBlob); } -I420Blob::I420Blob(const Blob::Ptr& y, const Blob::Ptr& u, const Blob::Ptr& v) { - // verify data is correct - verifyI420BlobInput(y, u, v); - // set blobs - _blobs.emplace_back(y); - _blobs.emplace_back(u); - _blobs.emplace_back(v); - tensorDesc = TensorDesc(Precision::U8, {}, Layout::NCHW); +I420Blob::I420Blob(const Blob::Ptr& y, const Blob::Ptr& u, const Blob::Ptr& v) + : CompoundBlob(verifyI420BlobInput(y, u, v)) { + this->_blobs = {y, u, v}; } -I420Blob::I420Blob(Blob::Ptr&& y, Blob::Ptr&& u, Blob::Ptr&& v) { - // verify data is correct - verifyI420BlobInput(y, u, v); - // set blobs - _blobs.emplace_back(std::move(y)); - _blobs.emplace_back(std::move(u)); - _blobs.emplace_back(std::move(v)); - tensorDesc = TensorDesc(Precision::U8, {}, Layout::NCHW); +I420Blob::I420Blob(Blob::Ptr&& y, Blob::Ptr&& u, Blob::Ptr&& v) + : CompoundBlob(verifyI420BlobInput(y, u, v)) { + this->_blobs = {std::move(y), std::move(u), std::move(v)}; } -I420Blob::~I420Blob() {} - Blob::Ptr& I420Blob::y() noexcept { // NOTE: Y plane is a memory blob, which is checked in the constructor return _blobs[0]; @@ -412,4 +458,14 @@ Blob::Ptr I420Blob::createROI(const ROI& roi) const { return std::make_shared(yRoiBlob, uRoiBlob, vRoiBlob); } +BatchedBlob::BatchedBlob(const std::vector& blobs) + : CompoundBlob(verifyBatchedBlobInput(blobs)) { + this->_blobs = blobs; +} + +BatchedBlob::BatchedBlob(std::vector&& blobs) + : CompoundBlob(verifyBatchedBlobInput(blobs)) { + this->_blobs = std::move(blobs); +} + } // namespace InferenceEngine diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/set_blob_of_kind.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/set_blob_of_kind.cpp new file mode 100644 index 00000000000..b50f301e30f --- /dev/null +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/set_blob_of_kind.cpp @@ -0,0 +1,39 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "behavior/set_blob_of_kind.hpp" +#include "common_test_utils/test_constants.hpp" +#include "multi-device/multi_device_config.hpp" + +using namespace BehaviorTestsDefinitions; +using namespace InferenceEngine; + +const std::vector blobKinds = { + FuncTestUtils::BlobKind::Simple, + FuncTestUtils::BlobKind::Compound, + FuncTestUtils::BlobKind::BatchOfSimple +}; + +const SetBlobOfKindConfig cpuConfig{}; //nothing special +const SetBlobOfKindConfig multiConfig{{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , CommonTestUtils::DEVICE_CPU}}; +const SetBlobOfKindConfig heteroConfig{{ "TARGET_FALLBACK", CommonTestUtils::DEVICE_CPU }}; + +INSTANTIATE_TEST_CASE_P(smoke_SetBlobOfKindCPU, SetBlobOfKindTest, + ::testing::Combine(::testing::ValuesIn(blobKinds), + ::testing::Values(CommonTestUtils::DEVICE_CPU), + ::testing::Values(cpuConfig)), + SetBlobOfKindTest::getTestCaseName); + + +INSTANTIATE_TEST_CASE_P(smoke_SetBlobOfKindMULTI, SetBlobOfKindTest, + ::testing::Combine(::testing::ValuesIn(blobKinds), + ::testing::Values(CommonTestUtils::DEVICE_MULTI), + ::testing::Values(multiConfig)), + SetBlobOfKindTest::getTestCaseName); + +INSTANTIATE_TEST_CASE_P(smoke_SetBlobOfKindHETERO, SetBlobOfKindTest, + ::testing::Combine(::testing::ValuesIn(blobKinds), + ::testing::Values(CommonTestUtils::DEVICE_HETERO), + ::testing::Values(heteroConfig)), + SetBlobOfKindTest::getTestCaseName); diff --git a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/behavior/set_blob_of_kind.cpp b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/behavior/set_blob_of_kind.cpp new file mode 100644 index 00000000000..e638e5e01df --- /dev/null +++ b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/behavior/set_blob_of_kind.cpp @@ -0,0 +1,23 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "behavior/set_blob_of_kind.hpp" +#include "common_test_utils/test_constants.hpp" + +using namespace BehaviorTestsDefinitions; +using namespace InferenceEngine; + +const std::vector blobKinds = { + FuncTestUtils::BlobKind::Simple, + FuncTestUtils::BlobKind::Compound, + FuncTestUtils::BlobKind::BatchOfSimple +}; + +const SetBlobOfKindConfig gpuConfig{}; //nothing special + +INSTANTIATE_TEST_CASE_P(smoke_SetBlobOfKindGPU, SetBlobOfKindTest, + ::testing::Combine(::testing::ValuesIn(blobKinds), + ::testing::Values(CommonTestUtils::DEVICE_GPU), + ::testing::Values(gpuConfig)), + SetBlobOfKindTest::getTestCaseName); diff --git a/inference-engine/tests/functional/plugin/shared/include/behavior/set_blob_of_kind.hpp b/inference-engine/tests/functional/plugin/shared/include/behavior/set_blob_of_kind.hpp new file mode 100644 index 00000000000..4f305a74b99 --- /dev/null +++ b/inference-engine/tests/functional/plugin/shared/include/behavior/set_blob_of_kind.hpp @@ -0,0 +1,32 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "functional_test_utils/layer_test_utils.hpp" +#include "common_test_utils/common_utils.hpp" + +namespace BehaviorTestsDefinitions { + +using SetBlobOfKindConfig = std::remove_referenceGetConfiguration())>::type; + +using SetBlobOfKindParams = std::tuple; // configuration + +class SetBlobOfKindTest : public testing::WithParamInterface, virtual public LayerTestsUtils::LayerTestsCommon { +public: + InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo &info) const override; + void Run() override; + static std::string getTestCaseName(testing::TestParamInfo obj); + void ExpectSetBlobThrow(); + +protected: + void SetUp() override; + +private: + FuncTestUtils::BlobKind blobKind; +}; + +} // namespace BehaviorTestsDefinitions diff --git a/inference-engine/tests/functional/plugin/shared/src/behavior/set_blob_of_kind.cpp b/inference-engine/tests/functional/plugin/shared/src/behavior/set_blob_of_kind.cpp new file mode 100644 index 00000000000..ae71b88d57d --- /dev/null +++ b/inference-engine/tests/functional/plugin/shared/src/behavior/set_blob_of_kind.cpp @@ -0,0 +1,106 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "behavior/set_blob_of_kind.hpp" + +#include +#include + +#include + +using namespace InferenceEngine; + +namespace BehaviorTestsDefinitions { + +std::string SetBlobOfKindTest::getTestCaseName(testing::TestParamInfo obj) { + FuncTestUtils::BlobKind blobKind; + std::string targetDevice; + std::map configuration; + std::tie(blobKind, targetDevice, configuration) = obj.param; + + std::ostringstream result; + result << "Kind=" << blobKind; + result << " Device="<< targetDevice; + return result.str(); +} + +namespace { + +bool isBatchedBlobSupported(const std::shared_ptr& core, const LayerTestsUtils::TargetDevice& targetDevice) { + const std::vector supported_metrics = core->GetMetric(targetDevice, METRIC_KEY(SUPPORTED_METRICS)); + + if (std::find(supported_metrics.begin(), supported_metrics.end(), + METRIC_KEY(OPTIMIZATION_CAPABILITIES)) == supported_metrics.end()) { + return false; + } + + const std::vector optimization_caps = + core->GetMetric(targetDevice, METRIC_KEY(OPTIMIZATION_CAPABILITIES)); + + return std::find(optimization_caps.begin(), optimization_caps.end(), + METRIC_VALUE(BATCHED_BLOB)) != optimization_caps.end(); +} + +bool isBlobKindSupported(const std::shared_ptr& core, + const LayerTestsUtils::TargetDevice& targetDevice, + FuncTestUtils::BlobKind blobKind) { + switch (blobKind) { + case FuncTestUtils::BlobKind::Simple: + return true; + case FuncTestUtils::BlobKind::Compound: + return false; + case FuncTestUtils::BlobKind::BatchOfSimple: + return isBatchedBlobSupported(core, targetDevice); + default: + THROW_IE_EXCEPTION << "Test does not support the blob kind"; + } +} + +} // namespace + +Blob::Ptr SetBlobOfKindTest::GenerateInput(const InferenceEngine::InputInfo& info) const { + return makeBlobOfKind(info.getTensorDesc(), blobKind); +} + +void SetBlobOfKindTest::Run() { + SKIP_IF_CURRENT_TEST_IS_DISABLED() + + LoadNetwork(); + + if (isBlobKindSupported(core, targetDevice, blobKind)) { + Infer(); + } else { + ExpectSetBlobThrow(); + } +} + +void SetBlobOfKindTest::ExpectSetBlobThrow() { + inferRequest = executableNetwork.CreateInferRequest(); + + for (const auto &input : executableNetwork.GetInputsInfo()) { + const auto &info = input.second; + auto blob = GenerateInput(*info); + EXPECT_THROW(inferRequest.SetBlob(info->name(), blob), + InferenceEngine::details::InferenceEngineException); + } +} + +void SetBlobOfKindTest::SetUp() { + SizeVector IS{4, 3, 6, 8}; + std::tie(blobKind, targetDevice, configuration) = this->GetParam(); + + auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(Precision::FP32); + auto params = ngraph::builder::makeParams(ngPrc, {IS}); + auto paramOuts = ngraph::helpers::convert2OutputVector(ngraph::helpers::castOps2Nodes(params)); + auto axisNode = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{}, std::vector{-1})->output(0); + auto cumSum = std::dynamic_pointer_cast(ngraph::builder::makeCumSum(paramOuts[0], axisNode, false, false)); + ngraph::ResultVector results{std::make_shared(cumSum)}; + function = std::make_shared(results, params, "InferSetBlob"); +} + +TEST_P(SetBlobOfKindTest, CompareWithRefs) { + Run(); +} + +} // namespace BehaviorTestsDefinitions diff --git a/inference-engine/tests/ie_test_utils/functional_test_utils/blob_utils.hpp b/inference-engine/tests/ie_test_utils/functional_test_utils/blob_utils.hpp index b24f1266597..3a6be95061b 100644 --- a/inference-engine/tests/ie_test_utils/functional_test_utils/blob_utils.hpp +++ b/inference-engine/tests/ie_test_utils/functional_test_utils/blob_utils.hpp @@ -13,6 +13,7 @@ #include #include "blob_factory.hpp" #include "blob_transform.hpp" +#include "ie_compound_blob.h" #include "precision_utils.h" #include "common_test_utils/data_utils.hpp" #include "common_test_utils/test_constants.hpp" @@ -599,4 +600,46 @@ static short reducePrecisionBitwiseS(const float in) { return s; } } // namespace Bf16TestUtils + +enum class BlobKind { + Simple, + Compound, + BatchOfSimple +}; + +inline std::ostream& operator<<(std::ostream& os, BlobKind kind) { + switch (kind) { + case BlobKind::Simple: + return os << "Simple"; + case BlobKind::Compound: + return os << "Compound"; + case BlobKind::BatchOfSimple: + return os << "BatchOfSimple"; + default: + THROW_IE_EXCEPTION << "Test does not support the blob kind"; + } +} + +inline InferenceEngine::Blob::Ptr makeBlobOfKind(const InferenceEngine::TensorDesc& td, BlobKind blobKind) { + using namespace ::InferenceEngine; + switch (blobKind) { + case BlobKind::Simple: + return createAndFillBlob(td); + case BlobKind::Compound: + return make_shared_blob(std::vector{}); + case BlobKind::BatchOfSimple: { + const auto subBlobsNum = td.getDims()[0]; + auto subBlobDesc = td; + subBlobDesc.getDims()[0] = 1; + std::vector subBlobs; + for (size_t i = 0; i < subBlobsNum; i++) { + subBlobs.push_back(makeBlobOfKind(subBlobDesc, BlobKind::Simple)); + } + return make_shared_blob(subBlobs); + } + default: + THROW_IE_EXCEPTION << "Test does not support the blob kind"; + } +} + } // namespace FuncTestUtils