Add fillBlobs to fix issue with negative values in image_info input layer (#3335)

This commit is contained in:
Vitaliy Urusovskij
2020-11-25 14:37:21 +03:00
committed by GitHub
parent 5767a54fa2
commit 7bd3738afd
9 changed files with 242 additions and 8 deletions

View File

@@ -4,6 +4,7 @@
#include "pipelines.h"
#include "../utils.h"
#include "../ie_utils.h"
#include <iostream>
#include <string>
@@ -113,6 +114,12 @@ std::function<void()> infer_request_inference(const std::string &model, const st
CNNNetwork cnnNetwork = ie.ReadNetwork(model);
ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, target_device);
InferRequest infer_request = exeNetwork.CreateInferRequest();
auto batchSize = cnnNetwork.getBatchSize();
batchSize = batchSize != 0 ? batchSize : 1;
const InferenceEngine::ConstInputsDataMap inputsInfo(exeNetwork.GetInputsInfo());
fillBlobs(infer_request, inputsInfo, batchSize);
infer_request.Infer();
OutputsDataMap output_info(cnnNetwork.getOutputsInfo());
for (auto &output : output_info)
@@ -120,10 +127,9 @@ std::function<void()> infer_request_inference(const std::string &model, const st
};
}
std::function<void()> reinfer_request_inference(InferenceEngine::InferRequest& infer_request, InferenceEngine::CNNNetwork& cnnNetwork) {
std::function<void()> reinfer_request_inference(InferenceEngine::InferRequest& infer_request, InferenceEngine::OutputsDataMap& output_info) {
return [&] {
infer_request.Infer();
OutputsDataMap output_info(cnnNetwork.getOutputsInfo());
for (auto &output : output_info)
Blob::Ptr outputBlob = infer_request.GetBlob(output.first);
};

View File

@@ -15,5 +15,4 @@ std::function<void()> recreate_exenetwork(InferenceEngine::Core &ie, const std::
std::function<void()> create_infer_request(const std::string &model, const std::string &target_device);
std::function<void()> recreate_infer_request(InferenceEngine::ExecutableNetwork& exeNetwork);
std::function<void()> infer_request_inference(const std::string &model, const std::string &target_device);
std::function<void()> infer_request_inference(const std::string &model, const std::string &target_device);
std::function<void()> reinfer_request_inference(InferenceEngine::InferRequest& infer_request, InferenceEngine::CNNNetwork& cnnNetwork);
std::function<void()> reinfer_request_inference(InferenceEngine::InferRequest& infer_request, InferenceEngine::OutputsDataMap& output_info);

View File

@@ -0,0 +1,58 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "ie_utils.h"
#include <inference_engine.hpp>
using namespace InferenceEngine;
/**
* @brief Fill InferRequest blobs with random values or image information
*/
void fillBlobs(InferenceEngine::InferRequest inferRequest,
const InferenceEngine::ConstInputsDataMap& inputsInfo,
const size_t& batchSize) {
std::vector<std::pair<size_t, size_t>> input_image_sizes;
for (const ConstInputsDataMap::value_type& item : inputsInfo) {
if (isImage(item.second))
input_image_sizes.push_back(getTensorHeightWidth(item.second->getTensorDesc()));
}
for (const ConstInputsDataMap::value_type& item : inputsInfo) {
Blob::Ptr inputBlob = inferRequest.GetBlob(item.first);
if (isImageInfo(inputBlob) && (input_image_sizes.size() == 1)) {
// Fill image information
auto image_size = input_image_sizes.at(0);
if (item.second->getPrecision() == InferenceEngine::Precision::FP32) {
fillBlobImInfo<float>(inputBlob, batchSize, image_size);
} else if (item.second->getPrecision() == InferenceEngine::Precision::FP16) {
fillBlobImInfo<short>(inputBlob, batchSize, image_size);
} else if (item.second->getPrecision() == InferenceEngine::Precision::I32) {
fillBlobImInfo<int32_t>(inputBlob, batchSize, image_size);
} else {
THROW_IE_EXCEPTION << "Input precision is not supported for image info!";
}
continue;
}
// Fill random
if (item.second->getPrecision() == InferenceEngine::Precision::FP32) {
fillBlobRandom<float>(inputBlob);
} else if (item.second->getPrecision() == InferenceEngine::Precision::FP16) {
fillBlobRandom<short>(inputBlob);
} else if (item.second->getPrecision() == InferenceEngine::Precision::I32) {
fillBlobRandom<int32_t>(inputBlob);
} else if (item.second->getPrecision() == InferenceEngine::Precision::U8) {
fillBlobRandom<uint8_t>(inputBlob);
} else if (item.second->getPrecision() == InferenceEngine::Precision::I8) {
fillBlobRandom<int8_t>(inputBlob);
} else if (item.second->getPrecision() == InferenceEngine::Precision::U16) {
fillBlobRandom<uint16_t>(inputBlob);
} else if (item.second->getPrecision() == InferenceEngine::Precision::I16) {
fillBlobRandom<int16_t>(inputBlob);
} else {
THROW_IE_EXCEPTION << "Input precision is not supported for " << item.first;
}
}
}

View File

@@ -0,0 +1,114 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <inference_engine.hpp>
using namespace InferenceEngine;
/**
* @brief Determine if InferenceEngine blob means image or not
*/
template<typename T>
static bool isImage(const T &blob) {
auto descriptor = blob->getTensorDesc();
if (descriptor.getLayout() != InferenceEngine::NCHW) {
return false;
}
auto channels = descriptor.getDims()[1];
return channels == 3;
}
/**
* @brief Determine if InferenceEngine blob means image information or not
*/
template<typename T>
static bool isImageInfo(const T &blob) {
auto descriptor = blob->getTensorDesc();
if (descriptor.getLayout() != InferenceEngine::NC) {
return false;
}
auto channels = descriptor.getDims()[1];
return (channels >= 2);
}
/**
* @brief Return height and width from provided InferenceEngine tensor description
*/
inline std::pair<size_t, size_t> getTensorHeightWidth(const InferenceEngine::TensorDesc& desc) {
const auto& layout = desc.getLayout();
const auto& dims = desc.getDims();
const auto& size = dims.size();
if ((size >= 2) &&
(layout == InferenceEngine::Layout::NCHW ||
layout == InferenceEngine::Layout::NHWC ||
layout == InferenceEngine::Layout::NCDHW ||
layout == InferenceEngine::Layout::NDHWC ||
layout == InferenceEngine::Layout::OIHW ||
layout == InferenceEngine::Layout::GOIHW ||
layout == InferenceEngine::Layout::OIDHW ||
layout == InferenceEngine::Layout::GOIDHW ||
layout == InferenceEngine::Layout::CHW ||
layout == InferenceEngine::Layout::HW)) {
// Regardless of layout, dimensions are stored in fixed order
return std::make_pair(dims.back(), dims.at(size - 2));
} else {
THROW_IE_EXCEPTION << "Tensor does not have height and width dimensions";
}
}
/**
* @brief Fill InferenceEngine blob with random values
*/
template<typename T>
void fillBlobRandom(Blob::Ptr& inputBlob) {
MemoryBlob::Ptr minput = as<MemoryBlob>(inputBlob);
// locked memory holder should be alive all time while access to its buffer happens
auto minputHolder = minput->wmap();
auto inputBlobData = minputHolder.as<T *>();
for (size_t i = 0; i < inputBlob->size(); i++) {
auto rand_max = RAND_MAX;
inputBlobData[i] = (T) rand() / static_cast<T>(rand_max) * 10;
}
}
/**
* @brief Fill InferenceEngine blob with image information
*/
template<typename T>
void fillBlobImInfo(Blob::Ptr& inputBlob,
const size_t& batchSize,
std::pair<size_t, size_t> image_size) {
MemoryBlob::Ptr minput = as<MemoryBlob>(inputBlob);
// locked memory holder should be alive all time while access to its buffer happens
auto minputHolder = minput->wmap();
auto inputBlobData = minputHolder.as<T *>();
for (size_t b = 0; b < batchSize; b++) {
size_t iminfoSize = inputBlob->size()/batchSize;
for (size_t i = 0; i < iminfoSize; i++) {
size_t index = b*iminfoSize + i;
if (0 == i)
inputBlobData[index] = static_cast<T>(image_size.first);
else if (1 == i)
inputBlobData[index] = static_cast<T>(image_size.second);
else
inputBlobData[index] = 1;
}
}
}
/**
* @brief Fill InferRequest blobs with random values or image information
*/
void fillBlobs(InferenceEngine::InferRequest inferRequest,
const InferenceEngine::ConstInputsDataMap& inputsInfo,
const size_t& batchSize);

View File

@@ -4,6 +4,7 @@
#include "tests_utils.h"
#include "../common/tests_utils.h"
#include "../common/ie_utils.h"
#include "../common/managers/thread_manager.h"
#include "tests_pipelines/tests_pipelines.h"
@@ -75,6 +76,12 @@ TEST_P(MemCheckTestSuite, infer_request_inference) {
CNNNetwork cnnNetwork = ie.ReadNetwork(model);
ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, device);
InferRequest inferRequest = exeNetwork.CreateInferRequest();
auto batchSize = cnnNetwork.getBatchSize();
batchSize = batchSize != 0 ? batchSize : 1;
const ConstInputsDataMap inputsInfo(exeNetwork.GetInputsInfo());
fillBlobs(inferRequest, inputsInfo, batchSize);
inferRequest.Infer();
OutputsDataMap output_info(cnnNetwork.getOutputsInfo());
for (auto &output : output_info)

View File

@@ -3,6 +3,7 @@
//
#include "../common/tests_utils.h"
#include "../common/ie_utils.h"
#include "../common/managers/thread_manager.h"
#include "tests_pipelines/tests_pipelines.h"
@@ -98,7 +99,14 @@ TEST_P(MemLeaksTestSuite, reinfer_request_inference) {
CNNNetwork cnnNetwork = ie.ReadNetwork(test_params.model);
ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, test_params.device);
InferRequest infer_request = exeNetwork.CreateInferRequest();
return test_reinfer_request_inference(infer_request, cnnNetwork, test_params.model, test_params.device, test_params.numiters);
OutputsDataMap output_info(cnnNetwork.getOutputsInfo());
auto batchSize = cnnNetwork.getBatchSize();
batchSize = batchSize != 0 ? batchSize : 1;
const InferenceEngine::ConstInputsDataMap inputsInfo(exeNetwork.GetInputsInfo());
fillBlobs(infer_request, inputsInfo, batchSize);
return test_reinfer_request_inference(infer_request, output_info, test_params.model, test_params.device, test_params.numiters);
};
test_runner(test_params.numthreads, test);
}

View File

@@ -201,9 +201,9 @@ TestResult test_infer_request_inference(const std::string& model, const std::str
}
TestResult test_reinfer_request_inference(InferenceEngine::InferRequest& infer_request,
InferenceEngine::CNNNetwork& cnnNetwork, const std::string& model,
InferenceEngine::OutputsDataMap& output_info, const std::string& model,
const std::string& target_device, const int& n) {
log_info("Inference of InferRequest from network: \"" << model << "\" for device: \"" << target_device << "\" for "
<< n << " times");
return common_test_pipeline(reinfer_request_inference(infer_request, cnnNetwork), n);
return common_test_pipeline(reinfer_request_inference(infer_request, output_info), n);
}

View File

@@ -21,5 +21,5 @@ TestResult test_recreate_exenetwork(InferenceEngine::Core &ie, const std::string
TestResult test_create_infer_request(const std::string &model, const std::string &target_device, const int &n);
TestResult test_recreate_infer_request(InferenceEngine::ExecutableNetwork& network, const std::string &model, const std::string &target_device, const int &n);
TestResult test_infer_request_inference(const std::string &model, const std::string &target_device, const int &n);
TestResult test_reinfer_request_inference(InferenceEngine::InferRequest& infer_request, InferenceEngine::CNNNetwork& cnnNetwork, const std::string &model, const std::string &target_device, const int &n);
TestResult test_reinfer_request_inference(InferenceEngine::InferRequest& infer_request, InferenceEngine::OutputsDataMap& output_info, const std::string &model, const std::string &target_device, const int &n);
// tests_pipelines/tests_pipelines.cpp

View File

@@ -3,6 +3,7 @@
//
#include "tests_pipelines.h"
#include "../common/ie_utils.h"
#include <string>
@@ -56,6 +57,12 @@ void test_load_unload_plugin_full_pipeline(const std::string &model, const std::
reshapeCNNNetwork();
ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, target_device);
InferRequest infer_request = exeNetwork.CreateInferRequest();
auto batchSize = cnnNetwork.getBatchSize();
batchSize = batchSize != 0 ? batchSize : 1;
const ConstInputsDataMap inputsInfo(exeNetwork.GetInputsInfo());
fillBlobs(infer_request, inputsInfo, batchSize);
infer_request.Infer();
OutputsDataMap output_info(cnnNetwork.getOutputsInfo());
for (auto &output : output_info)
@@ -82,6 +89,12 @@ void test_read_network_full_pipeline(const std::string &model, const std::string
reshapeCNNNetwork();
ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, target_device);
InferRequest infer_request = exeNetwork.CreateInferRequest();
auto batchSize = cnnNetwork.getBatchSize();
batchSize = batchSize != 0 ? batchSize : 1;
const ConstInputsDataMap inputsInfo(exeNetwork.GetInputsInfo());
fillBlobs(infer_request, inputsInfo, batchSize);
infer_request.Infer();
OutputsDataMap output_info(cnnNetwork.getOutputsInfo());
for (auto &output : output_info)
@@ -109,6 +122,12 @@ void test_set_input_params_full_pipeline(const std::string &model, const std::st
reshapeCNNNetwork();
ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, target_device);
InferRequest infer_request = exeNetwork.CreateInferRequest();
auto batchSize = cnnNetwork.getBatchSize();
batchSize = batchSize != 0 ? batchSize : 1;
const ConstInputsDataMap inputsInfo(exeNetwork.GetInputsInfo());
fillBlobs(infer_request, inputsInfo, batchSize);
infer_request.Infer();
OutputsDataMap output_info(cnnNetwork.getOutputsInfo());
for (auto &output : output_info)
@@ -151,6 +170,12 @@ void test_cnnnetwork_reshape_batch_x2_full_pipeline(const std::string &model, co
}
ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, target_device);
InferRequest infer_request = exeNetwork.CreateInferRequest();
auto batchSize = cnnNetwork.getBatchSize();
batchSize = batchSize != 0 ? batchSize : 1;
const ConstInputsDataMap inputsInfo(exeNetwork.GetInputsInfo());
fillBlobs(infer_request, inputsInfo, batchSize);
infer_request.Infer();
OutputsDataMap output_info(cnnNetwork.getOutputsInfo());
for (auto &output : output_info)
@@ -178,6 +203,12 @@ void test_create_exenetwork_full_pipeline(const std::string &model, const std::s
exeNetwork = ie.LoadNetwork(cnnNetwork, target_device);
}
InferRequest infer_request = exeNetwork.CreateInferRequest();
auto batchSize = cnnNetwork.getBatchSize();
batchSize = batchSize != 0 ? batchSize : 1;
const ConstInputsDataMap inputsInfo(exeNetwork.GetInputsInfo());
fillBlobs(infer_request, inputsInfo, batchSize);
infer_request.Infer();
OutputsDataMap output_info(cnnNetwork.getOutputsInfo());
for (auto &output : output_info)
@@ -199,11 +230,16 @@ void test_create_infer_request_full_pipeline(const std::string &model, const std
reshapeCNNNetwork();
ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, target_device);
InferRequest infer_request;
auto batchSize = cnnNetwork.getBatchSize();
batchSize = batchSize != 0 ? batchSize : 1;
const ConstInputsDataMap inputsInfo(exeNetwork.GetInputsInfo());
for (int i = 0; i < n; i++) {
if (i == n / 2) {
log_info("Half of the test have already passed");
}
infer_request = exeNetwork.CreateInferRequest();
fillBlobs(infer_request, inputsInfo, batchSize);
}
infer_request.Infer();
OutputsDataMap output_info(cnnNetwork.getOutputsInfo());
@@ -226,6 +262,12 @@ void test_infer_request_inference_full_pipeline(const std::string &model, const
reshapeCNNNetwork();
ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, target_device);
InferRequest infer_request = exeNetwork.CreateInferRequest();
auto batchSize = cnnNetwork.getBatchSize();
batchSize = batchSize != 0 ? batchSize : 1;
const ConstInputsDataMap inputsInfo(exeNetwork.GetInputsInfo());
fillBlobs(infer_request, inputsInfo, batchSize);
for (int i = 0; i < n; i++) {
if (i == n / 2) {
log_info("Half of the test have already passed");