diff --git a/tests/time_tests/src/timetests/common.h b/tests/time_tests/src/timetests/common.h new file mode 100644 index 00000000000..555d1757962 --- /dev/null +++ b/tests/time_tests/src/timetests/common.h @@ -0,0 +1,154 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +using namespace InferenceEngine; + +/** + * @brief Determine if InferenceEngine blob means image or not + */ +template +static bool isImage(const T &blob) { + auto descriptor = blob->getTensorDesc(); + if (descriptor.getLayout() != InferenceEngine::NCHW) { + return false; + } + auto channels = descriptor.getDims()[1]; + return channels == 3; +} + + +/** + * @brief Determine if InferenceEngine blob means image information or not + */ +template +static bool isImageInfo(const T &blob) { + auto descriptor = blob->getTensorDesc(); + if (descriptor.getLayout() != InferenceEngine::NC) { + return false; + } + auto channels = descriptor.getDims()[1]; + return (channels >= 2); +} + + +/** + * @brief Return height and width from provided InferenceEngine tensor description + */ +inline std::pair getTensorHeightWidth(const InferenceEngine::TensorDesc& desc) { + const auto& layout = desc.getLayout(); + const auto& dims = desc.getDims(); + const auto& size = dims.size(); + if ((size >= 2) && + (layout == InferenceEngine::Layout::NCHW || + layout == InferenceEngine::Layout::NHWC || + layout == InferenceEngine::Layout::NCDHW || + layout == InferenceEngine::Layout::NDHWC || + layout == InferenceEngine::Layout::OIHW || + layout == InferenceEngine::Layout::GOIHW || + layout == InferenceEngine::Layout::OIDHW || + layout == InferenceEngine::Layout::GOIDHW || + layout == InferenceEngine::Layout::CHW || + layout == InferenceEngine::Layout::HW)) { + // Regardless of layout, dimensions are stored in fixed order + return std::make_pair(dims.back(), dims.at(size - 2)); + } else { + THROW_IE_EXCEPTION << "Tensor does not have height and width dimensions"; + } +} + + +/** + * @brief Fill InferenceEngine blob with random values + */ +template +void fillBlobRandom(Blob::Ptr& inputBlob) { + MemoryBlob::Ptr minput = as(inputBlob); + // locked memory holder should be alive all time while access to its buffer happens + auto minputHolder = minput->wmap(); + + auto inputBlobData = minputHolder.as(); + for (size_t i = 0; i < inputBlob->size(); i++) { + auto rand_max = RAND_MAX; + inputBlobData[i] = (T) rand() / static_cast(rand_max) * 10; + } +} + + +/** + * @brief Fill InferenceEngine blob with image information + */ +template +void fillBlobImInfo(Blob::Ptr& inputBlob, + const size_t& batchSize, + std::pair image_size) { + MemoryBlob::Ptr minput = as(inputBlob); + // locked memory holder should be alive all time while access to its buffer happens + auto minputHolder = minput->wmap(); + + auto inputBlobData = minputHolder.as(); + for (size_t b = 0; b < batchSize; b++) { + size_t iminfoSize = inputBlob->size()/batchSize; + for (size_t i = 0; i < iminfoSize; i++) { + size_t index = b*iminfoSize + i; + if (0 == i) + inputBlobData[index] = static_cast(image_size.first); + else if (1 == i) + inputBlobData[index] = static_cast(image_size.second); + else + inputBlobData[index] = 1; + } + } +} + + +/** + * @brief Fill InferRequest blobs with random values or image information + */ +void fillBlobs(InferenceEngine::InferRequest inferRequest, + const InferenceEngine::ConstInputsDataMap& inputsInfo, + const size_t& batchSize) { + std::vector> input_image_sizes; + for (const ConstInputsDataMap::value_type& item : inputsInfo) { + if (isImage(item.second)) + input_image_sizes.push_back(getTensorHeightWidth(item.second->getTensorDesc())); + } + + for (const ConstInputsDataMap::value_type& item : inputsInfo) { + Blob::Ptr inputBlob = inferRequest.GetBlob(item.first); + if (isImageInfo(inputBlob) && (input_image_sizes.size() == 1)) { + // Fill image information + auto image_size = input_image_sizes.at(0); + if (item.second->getPrecision() == InferenceEngine::Precision::FP32) { + fillBlobImInfo(inputBlob, batchSize, image_size); + } else if (item.second->getPrecision() == InferenceEngine::Precision::FP16) { + fillBlobImInfo(inputBlob, batchSize, image_size); + } else if (item.second->getPrecision() == InferenceEngine::Precision::I32) { + fillBlobImInfo(inputBlob, batchSize, image_size); + } else { + THROW_IE_EXCEPTION << "Input precision is not supported for image info!"; + } + continue; + } + // Fill random + if (item.second->getPrecision() == InferenceEngine::Precision::FP32) { + fillBlobRandom(inputBlob); + } else if (item.second->getPrecision() == InferenceEngine::Precision::FP16) { + fillBlobRandom(inputBlob); + } else if (item.second->getPrecision() == InferenceEngine::Precision::I32) { + fillBlobRandom(inputBlob); + } else if (item.second->getPrecision() == InferenceEngine::Precision::U8) { + fillBlobRandom(inputBlob); + } else if (item.second->getPrecision() == InferenceEngine::Precision::I8) { + fillBlobRandom(inputBlob); + } else if (item.second->getPrecision() == InferenceEngine::Precision::U16) { + fillBlobRandom(inputBlob); + } else if (item.second->getPrecision() == InferenceEngine::Precision::I16) { + fillBlobRandom(inputBlob); + } else { + THROW_IE_EXCEPTION << "Input precision is not supported for " << item.first; + } + } +} \ No newline at end of file diff --git a/tests/time_tests/src/timetests/timetest_infer.cpp b/tests/time_tests/src/timetests/timetest_infer.cpp index 1ba0b24a492..fedb7349a51 100644 --- a/tests/time_tests/src/timetests/timetest_infer.cpp +++ b/tests/time_tests/src/timetests/timetest_infer.cpp @@ -5,10 +5,12 @@ #include #include +#include "common.h" #include "timetests_helper/timer.h" #include "timetests_helper/utils.h" using namespace InferenceEngine; + /** * @brief Function that contain executable pipeline which will be called from * main(). The function should not throw any exceptions and responsible for @@ -17,6 +19,7 @@ using namespace InferenceEngine; int runPipeline(const std::string &model, const std::string &device) { auto pipeline = [](const std::string &model, const std::string &device) { Core ie; + CNNNetwork cnnNetwork; ExecutableNetwork exeNetwork; InferRequest inferRequest; @@ -33,7 +36,6 @@ int runPipeline(const std::string &model, const std::string &device) { exeNetwork = ie.ImportNetwork(model, device); } else { - CNNNetwork cnnNetwork; { SCOPED_TIMER(read_network); cnnNetwork = ie.ReadNetwork(model); @@ -50,6 +52,14 @@ int runPipeline(const std::string &model, const std::string &device) { { SCOPED_TIMER(first_inference); inferRequest = exeNetwork.CreateInferRequest(); + + { + SCOPED_TIMER(fill_inputs) + auto batchSize = cnnNetwork.getBatchSize(); + batchSize = batchSize != 0 ? batchSize : 1; + const InferenceEngine::ConstInputsDataMap inputsInfo(exeNetwork.GetInputsInfo()); + fillBlobs(inferRequest, inputsInfo, batchSize); + } inferRequest.Infer(); } };