Add fill_inputs
step in timetests (#2413)
This commit is contained in:
parent
1c97b13e9a
commit
0bd966ad08
154
tests/time_tests/src/timetests/common.h
Normal file
154
tests/time_tests/src/timetests/common.h
Normal file
@ -0,0 +1,154 @@
|
||||
// Copyright (C) 2020 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
using namespace InferenceEngine;
|
||||
|
||||
/**
|
||||
* @brief Determine if InferenceEngine blob means image or not
|
||||
*/
|
||||
template<typename T>
|
||||
static bool isImage(const T &blob) {
|
||||
auto descriptor = blob->getTensorDesc();
|
||||
if (descriptor.getLayout() != InferenceEngine::NCHW) {
|
||||
return false;
|
||||
}
|
||||
auto channels = descriptor.getDims()[1];
|
||||
return channels == 3;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @brief Determine if InferenceEngine blob means image information or not
|
||||
*/
|
||||
template<typename T>
|
||||
static bool isImageInfo(const T &blob) {
|
||||
auto descriptor = blob->getTensorDesc();
|
||||
if (descriptor.getLayout() != InferenceEngine::NC) {
|
||||
return false;
|
||||
}
|
||||
auto channels = descriptor.getDims()[1];
|
||||
return (channels >= 2);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @brief Return height and width from provided InferenceEngine tensor description
|
||||
*/
|
||||
inline std::pair<size_t, size_t> getTensorHeightWidth(const InferenceEngine::TensorDesc& desc) {
|
||||
const auto& layout = desc.getLayout();
|
||||
const auto& dims = desc.getDims();
|
||||
const auto& size = dims.size();
|
||||
if ((size >= 2) &&
|
||||
(layout == InferenceEngine::Layout::NCHW ||
|
||||
layout == InferenceEngine::Layout::NHWC ||
|
||||
layout == InferenceEngine::Layout::NCDHW ||
|
||||
layout == InferenceEngine::Layout::NDHWC ||
|
||||
layout == InferenceEngine::Layout::OIHW ||
|
||||
layout == InferenceEngine::Layout::GOIHW ||
|
||||
layout == InferenceEngine::Layout::OIDHW ||
|
||||
layout == InferenceEngine::Layout::GOIDHW ||
|
||||
layout == InferenceEngine::Layout::CHW ||
|
||||
layout == InferenceEngine::Layout::HW)) {
|
||||
// Regardless of layout, dimensions are stored in fixed order
|
||||
return std::make_pair(dims.back(), dims.at(size - 2));
|
||||
} else {
|
||||
THROW_IE_EXCEPTION << "Tensor does not have height and width dimensions";
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @brief Fill InferenceEngine blob with random values
|
||||
*/
|
||||
template<typename T>
|
||||
void fillBlobRandom(Blob::Ptr& inputBlob) {
|
||||
MemoryBlob::Ptr minput = as<MemoryBlob>(inputBlob);
|
||||
// locked memory holder should be alive all time while access to its buffer happens
|
||||
auto minputHolder = minput->wmap();
|
||||
|
||||
auto inputBlobData = minputHolder.as<T *>();
|
||||
for (size_t i = 0; i < inputBlob->size(); i++) {
|
||||
auto rand_max = RAND_MAX;
|
||||
inputBlobData[i] = (T) rand() / static_cast<T>(rand_max) * 10;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @brief Fill InferenceEngine blob with image information
|
||||
*/
|
||||
template<typename T>
|
||||
void fillBlobImInfo(Blob::Ptr& inputBlob,
|
||||
const size_t& batchSize,
|
||||
std::pair<size_t, size_t> image_size) {
|
||||
MemoryBlob::Ptr minput = as<MemoryBlob>(inputBlob);
|
||||
// locked memory holder should be alive all time while access to its buffer happens
|
||||
auto minputHolder = minput->wmap();
|
||||
|
||||
auto inputBlobData = minputHolder.as<T *>();
|
||||
for (size_t b = 0; b < batchSize; b++) {
|
||||
size_t iminfoSize = inputBlob->size()/batchSize;
|
||||
for (size_t i = 0; i < iminfoSize; i++) {
|
||||
size_t index = b*iminfoSize + i;
|
||||
if (0 == i)
|
||||
inputBlobData[index] = static_cast<T>(image_size.first);
|
||||
else if (1 == i)
|
||||
inputBlobData[index] = static_cast<T>(image_size.second);
|
||||
else
|
||||
inputBlobData[index] = 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @brief Fill InferRequest blobs with random values or image information
|
||||
*/
|
||||
void fillBlobs(InferenceEngine::InferRequest inferRequest,
|
||||
const InferenceEngine::ConstInputsDataMap& inputsInfo,
|
||||
const size_t& batchSize) {
|
||||
std::vector<std::pair<size_t, size_t>> input_image_sizes;
|
||||
for (const ConstInputsDataMap::value_type& item : inputsInfo) {
|
||||
if (isImage(item.second))
|
||||
input_image_sizes.push_back(getTensorHeightWidth(item.second->getTensorDesc()));
|
||||
}
|
||||
|
||||
for (const ConstInputsDataMap::value_type& item : inputsInfo) {
|
||||
Blob::Ptr inputBlob = inferRequest.GetBlob(item.first);
|
||||
if (isImageInfo(inputBlob) && (input_image_sizes.size() == 1)) {
|
||||
// Fill image information
|
||||
auto image_size = input_image_sizes.at(0);
|
||||
if (item.second->getPrecision() == InferenceEngine::Precision::FP32) {
|
||||
fillBlobImInfo<float>(inputBlob, batchSize, image_size);
|
||||
} else if (item.second->getPrecision() == InferenceEngine::Precision::FP16) {
|
||||
fillBlobImInfo<short>(inputBlob, batchSize, image_size);
|
||||
} else if (item.second->getPrecision() == InferenceEngine::Precision::I32) {
|
||||
fillBlobImInfo<int32_t>(inputBlob, batchSize, image_size);
|
||||
} else {
|
||||
THROW_IE_EXCEPTION << "Input precision is not supported for image info!";
|
||||
}
|
||||
continue;
|
||||
}
|
||||
// Fill random
|
||||
if (item.second->getPrecision() == InferenceEngine::Precision::FP32) {
|
||||
fillBlobRandom<float>(inputBlob);
|
||||
} else if (item.second->getPrecision() == InferenceEngine::Precision::FP16) {
|
||||
fillBlobRandom<short>(inputBlob);
|
||||
} else if (item.second->getPrecision() == InferenceEngine::Precision::I32) {
|
||||
fillBlobRandom<int32_t>(inputBlob);
|
||||
} else if (item.second->getPrecision() == InferenceEngine::Precision::U8) {
|
||||
fillBlobRandom<uint8_t>(inputBlob);
|
||||
} else if (item.second->getPrecision() == InferenceEngine::Precision::I8) {
|
||||
fillBlobRandom<int8_t>(inputBlob);
|
||||
} else if (item.second->getPrecision() == InferenceEngine::Precision::U16) {
|
||||
fillBlobRandom<uint16_t>(inputBlob);
|
||||
} else if (item.second->getPrecision() == InferenceEngine::Precision::I16) {
|
||||
fillBlobRandom<int16_t>(inputBlob);
|
||||
} else {
|
||||
THROW_IE_EXCEPTION << "Input precision is not supported for " << item.first;
|
||||
}
|
||||
}
|
||||
}
|
@ -5,10 +5,12 @@
|
||||
#include <inference_engine.hpp>
|
||||
#include <iostream>
|
||||
|
||||
#include "common.h"
|
||||
#include "timetests_helper/timer.h"
|
||||
#include "timetests_helper/utils.h"
|
||||
using namespace InferenceEngine;
|
||||
|
||||
|
||||
/**
|
||||
* @brief Function that contain executable pipeline which will be called from
|
||||
* main(). The function should not throw any exceptions and responsible for
|
||||
@ -17,6 +19,7 @@ using namespace InferenceEngine;
|
||||
int runPipeline(const std::string &model, const std::string &device) {
|
||||
auto pipeline = [](const std::string &model, const std::string &device) {
|
||||
Core ie;
|
||||
CNNNetwork cnnNetwork;
|
||||
ExecutableNetwork exeNetwork;
|
||||
InferRequest inferRequest;
|
||||
|
||||
@ -33,7 +36,6 @@ int runPipeline(const std::string &model, const std::string &device) {
|
||||
exeNetwork = ie.ImportNetwork(model, device);
|
||||
}
|
||||
else {
|
||||
CNNNetwork cnnNetwork;
|
||||
{
|
||||
SCOPED_TIMER(read_network);
|
||||
cnnNetwork = ie.ReadNetwork(model);
|
||||
@ -50,6 +52,14 @@ int runPipeline(const std::string &model, const std::string &device) {
|
||||
{
|
||||
SCOPED_TIMER(first_inference);
|
||||
inferRequest = exeNetwork.CreateInferRequest();
|
||||
|
||||
{
|
||||
SCOPED_TIMER(fill_inputs)
|
||||
auto batchSize = cnnNetwork.getBatchSize();
|
||||
batchSize = batchSize != 0 ? batchSize : 1;
|
||||
const InferenceEngine::ConstInputsDataMap inputsInfo(exeNetwork.GetInputsInfo());
|
||||
fillBlobs(inferRequest, inputsInfo, batchSize);
|
||||
}
|
||||
inferRequest.Infer();
|
||||
}
|
||||
};
|
||||
|
Loading…
Reference in New Issue
Block a user