Moved stress tests to OV API 2.0 (#9791)

* Moved stress tests to OV API 2.0

* Fix for using ouput index instead of get_index

* Removed ov::runtime namespace in stress tests

* Updated stress tests according to latest changes in OV 2.0

* Fix memleaks tests

* Updated run_memcheck.py to process gtest_filter

* Updated fillTensors, added InferAPI1 and InferAPI2 classes

* Updated test_inference_with_streams

* Updated isImage, comments

* Updated fillTensors to fill image_info inputs with positive pseudo-random numbers

* Removed redundant variable in fillTensors
This commit is contained in:
Lidia Toropova 2022-01-31 17:07:25 +03:00 committed by GitHub
parent 252b7d4728
commit 2f0bcc8542
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
27 changed files with 901 additions and 622 deletions

View File

@ -5,54 +5,84 @@
#include "common_utils.h" #include "common_utils.h"
#include <inference_engine.hpp> #include <inference_engine.hpp>
#include <openvino/openvino.hpp>
using namespace InferenceEngine;
/** /**
* @brief Fill InferRequest blobs with random values or image information * @brief Fill InferRequest blobs with random values or image information
*/ */
void fillBlobs(InferenceEngine::InferRequest inferRequest, void fillBlobs(InferenceEngine::InferRequest inferRequest,
const InferenceEngine::ConstInputsDataMap& inputsInfo, const InferenceEngine::ConstInputsDataMap &inputsInfo,
const size_t& batchSize) { const size_t &batchSize) {
std::vector<std::pair<size_t, size_t>> input_image_sizes; std::vector<std::pair<size_t, size_t>> input_image_sizes;
for (const ConstInputsDataMap::value_type& item : inputsInfo) { for (const InferenceEngine::ConstInputsDataMap::value_type &item: inputsInfo) {
if (isImage(item.second)) if (isImage(item.second))
input_image_sizes.push_back(getTensorHeightWidth(item.second->getTensorDesc())); input_image_sizes.push_back(getTensorHeightWidth(item.second->getTensorDesc()));
} }
for (const ConstInputsDataMap::value_type& item : inputsInfo) { for (const InferenceEngine::ConstInputsDataMap::value_type &item: inputsInfo) {
Blob::Ptr inputBlob = inferRequest.GetBlob(item.first); InferenceEngine::Blob::Ptr inputBlob = inferRequest.GetBlob(item.first);
if (isImageInfo(inputBlob) && (input_image_sizes.size() == 1)) { if (isImageInfo(inputBlob) && (input_image_sizes.size() == 1)) {
// Fill image information // Fill image information
auto image_size = input_image_sizes.at(0); auto image_size = input_image_sizes.at(0);
if (item.second->getPrecision() == InferenceEngine::Precision::FP32) { if (item.second->getPrecision() == InferenceEngine::Precision::FP32) {
fillBlobImInfo<float>(inputBlob, batchSize, image_size); fillBlobImInfo<float>(inputBlob, batchSize, image_size);
} else if (item.second->getPrecision() == InferenceEngine::Precision::FP16) { } else if (item.second->getPrecision() == InferenceEngine::Precision::FP16) {
fillBlobImInfo<short>(inputBlob, batchSize, image_size); fillBlobImInfo<short>(inputBlob, batchSize, image_size);
} else if (item.second->getPrecision() == InferenceEngine::Precision::I32) { } else if (item.second->getPrecision() == InferenceEngine::Precision::I32) {
fillBlobImInfo<int32_t>(inputBlob, batchSize, image_size); fillBlobImInfo<int32_t>(inputBlob, batchSize, image_size);
} else { } else if (item.second->getPrecision() == InferenceEngine::Precision::U8) {
throw std::logic_error("Input precision is not supported for image info!"); fillBlobImInfo<uint8_t>(inputBlob, batchSize, image_size);
} } else {
continue; throw std::logic_error("Input precision is not supported for image info!");
}
continue;
}
// Fill random
if (item.second->getPrecision() == InferenceEngine::Precision::FP32) {
fillBlobRandom<float>(inputBlob);
} else if (item.second->getPrecision() == InferenceEngine::Precision::FP16) {
fillBlobRandom<short>(inputBlob);
} else if (item.second->getPrecision() == InferenceEngine::Precision::I32) {
fillBlobRandom<int32_t>(inputBlob);
} else if (item.second->getPrecision() == InferenceEngine::Precision::U8) {
fillBlobRandom<uint8_t>(inputBlob);
} else if (item.second->getPrecision() == InferenceEngine::Precision::I8) {
fillBlobRandom<int8_t>(inputBlob);
} else if (item.second->getPrecision() == InferenceEngine::Precision::U16) {
fillBlobRandom<uint16_t>(inputBlob);
} else if (item.second->getPrecision() == InferenceEngine::Precision::I16) {
fillBlobRandom<int16_t>(inputBlob);
} else {
throw std::logic_error("Input precision is not supported for " + item.first);
}
} }
// Fill random }
if (item.second->getPrecision() == InferenceEngine::Precision::FP32) {
fillBlobRandom<float>(inputBlob); /**
} else if (item.second->getPrecision() == InferenceEngine::Precision::FP16) { * @brief Fill infer_request tensors with random values or image information
fillBlobRandom<short>(inputBlob); */
} else if (item.second->getPrecision() == InferenceEngine::Precision::I32) { void fillTensors(ov::InferRequest &infer_request, const std::vector<ov::Output<ov::Node>> &inputs) {
fillBlobRandom<int32_t>(inputBlob); for (size_t i = 0; i < inputs.size(); ++i) {
} else if (item.second->getPrecision() == InferenceEngine::Precision::U8) { ov::Tensor input_tensor;
fillBlobRandom<uint8_t>(inputBlob); if (inputs[i].get_element_type() == ov::element::f32) {
} else if (item.second->getPrecision() == InferenceEngine::Precision::I8) { input_tensor = fillTensorRandom<float>(inputs[i]);
fillBlobRandom<int8_t>(inputBlob); } else if (inputs[i].get_element_type() == ov::element::f16) {
} else if (item.second->getPrecision() == InferenceEngine::Precision::U16) { input_tensor = fillTensorRandom<short>(inputs[i]);
fillBlobRandom<uint16_t>(inputBlob); } else if (inputs[i].get_element_type() == ov::element::i32) {
} else if (item.second->getPrecision() == InferenceEngine::Precision::I16) { input_tensor = fillTensorRandom<int32_t>(inputs[i]);
fillBlobRandom<int16_t>(inputBlob); } else if (inputs[i].get_element_type() == ov::element::u8) {
} else { input_tensor = fillTensorRandom<uint8_t>(inputs[i]);
throw std::logic_error("Input precision is not supported for " + item.first); } else if (inputs[i].get_element_type() == ov::element::i8) {
input_tensor = fillTensorRandom<int8_t>(inputs[i]);
} else if (inputs[i].get_element_type() == ov::element::u16) {
input_tensor = fillTensorRandom<uint16_t>(inputs[i]);
} else if (inputs[i].get_element_type() == ov::element::i16) {
input_tensor = fillTensorRandom<int16_t>(inputs[i]);
} else {
throw std::logic_error(
"Input precision is not supported for " + inputs[i].get_element_type().get_type_name());
}
infer_request.set_input_tensor(i, input_tensor);
} }
}
} }

View File

@ -5,103 +5,118 @@
#pragma once #pragma once
#include <inference_engine.hpp> #include <inference_engine.hpp>
#include <limits>
using namespace InferenceEngine;
/** /**
* @brief Determine if InferenceEngine blob means image or not * @brief Determine if InferenceEngine blob means image or not (OV API 1.0)
*/ */
template<typename T> template<typename T>
static bool isImage(const T &blob) { static bool isImage(const T &blob) {
auto descriptor = blob->getTensorDesc(); auto descriptor = blob->getTensorDesc();
if (descriptor.getLayout() != InferenceEngine::NCHW) { if (descriptor.getLayout() != InferenceEngine::NCHW) {
return false; return false;
} }
auto channels = descriptor.getDims()[1]; auto channels = descriptor.getDims()[1];
return channels == 3; return channels == 3;
} }
/** /**
* @brief Determine if InferenceEngine blob means image information or not * @brief Determine if InferenceEngine blob means image information or not (OV API 1.0)
*/ */
template<typename T> template<typename T>
static bool isImageInfo(const T &blob) { static bool isImageInfo(const T &blob) {
auto descriptor = blob->getTensorDesc(); auto descriptor = blob->getTensorDesc();
if (descriptor.getLayout() != InferenceEngine::NC) { if (descriptor.getLayout() != InferenceEngine::NC) {
return false; return false;
} }
auto channels = descriptor.getDims()[1]; auto channels = descriptor.getDims()[1];
return (channels >= 2); return (channels >= 2);
} }
/** /**
* @brief Return height and width from provided InferenceEngine tensor description * @brief Return height and width from provided InferenceEngine tensor description (OV API 1)
*/ */
inline std::pair<size_t, size_t> getTensorHeightWidth(const InferenceEngine::TensorDesc& desc) { inline std::pair<size_t, size_t> getTensorHeightWidth(const InferenceEngine::TensorDesc &desc) {
const auto& layout = desc.getLayout(); const auto &layout = desc.getLayout();
const auto& dims = desc.getDims(); const auto &dims = desc.getDims();
const auto& size = dims.size(); const auto &size = dims.size();
if ((size >= 2) && if ((size >= 2) &&
(layout == InferenceEngine::Layout::NCHW || (layout == InferenceEngine::Layout::NCHW ||
layout == InferenceEngine::Layout::NHWC || layout == InferenceEngine::Layout::NHWC ||
layout == InferenceEngine::Layout::NCDHW || layout == InferenceEngine::Layout::NCDHW ||
layout == InferenceEngine::Layout::NDHWC || layout == InferenceEngine::Layout::NDHWC ||
layout == InferenceEngine::Layout::OIHW || layout == InferenceEngine::Layout::OIHW ||
layout == InferenceEngine::Layout::GOIHW || layout == InferenceEngine::Layout::GOIHW ||
layout == InferenceEngine::Layout::OIDHW || layout == InferenceEngine::Layout::OIDHW ||
layout == InferenceEngine::Layout::GOIDHW || layout == InferenceEngine::Layout::GOIDHW ||
layout == InferenceEngine::Layout::CHW || layout == InferenceEngine::Layout::CHW ||
layout == InferenceEngine::Layout::HW)) { layout == InferenceEngine::Layout::HW)) {
// Regardless of layout, dimensions are stored in fixed order // Regardless of layout, dimensions are stored in fixed order
return std::make_pair(dims.back(), dims.at(size - 2)); return std::make_pair(dims.back(), dims.at(size - 2));
} else { } else {
throw std::logic_error("Tensor does not have height and width dimensions"); throw std::logic_error("Tensor does not have height and width dimensions");
} }
} }
/** /**
* @brief Fill InferenceEngine blob with random values * @brief Fill InferenceEngine blob with random values
*/ */
template<typename T> template<typename T>
void fillBlobRandom(Blob::Ptr& inputBlob) { void fillBlobRandom(InferenceEngine::Blob::Ptr &inputBlob) {
MemoryBlob::Ptr minput = as<MemoryBlob>(inputBlob); auto minput = InferenceEngine::as<InferenceEngine::MemoryBlob>(inputBlob);
// locked memory holder should be alive all time while access to its buffer happens // locked memory holder should be alive all time while access to its buffer happens
auto minputHolder = minput->wmap(); auto minputHolder = minput->wmap();
auto inputBlobData = minputHolder.as<T *>(); auto inputBlobData = minputHolder.as<T *>();
for (size_t i = 0; i < inputBlob->size(); i++) { for (size_t i = 0; i < inputBlob->size(); i++) {
auto rand_max = RAND_MAX; auto rand_max = RAND_MAX;
inputBlobData[i] = (T) rand() / static_cast<T>(rand_max) * 10; inputBlobData[i] = (T) rand() / static_cast<T>(rand_max) * 10;
} }
}
/**
* @brief Fill InferenceEngine tensor with random values (OV API 2.0)
*/
template<typename T>
ov::Tensor fillTensorRandom(const ov::Output<ov::Node> &input) {
ov::Tensor tensor{input.get_element_type(), input.get_shape()};
std::vector<T> values(ov::shape_size(input.get_shape()));
for (size_t i = 0; i < values.size(); ++i) {
values[i] = 1 + static_cast <T> (rand()) / (static_cast <T> (RAND_MAX / (std::numeric_limits<T>::max() - 1)));
}
std::memcpy(tensor.data(), values.data(), sizeof(T) * values.size());
return tensor;
} }
/** /**
* @brief Fill InferenceEngine blob with image information * @brief Fill InferenceEngine blob with image information (OV API 1.0)
*/ */
template<typename T> template<typename T>
void fillBlobImInfo(Blob::Ptr& inputBlob, void fillBlobImInfo(InferenceEngine::Blob::Ptr &inputBlob,
const size_t& batchSize, const size_t &batchSize,
std::pair<size_t, size_t> image_size) { std::pair<size_t, size_t> image_size) {
MemoryBlob::Ptr minput = as<MemoryBlob>(inputBlob); InferenceEngine::MemoryBlob::Ptr minput = InferenceEngine::as<InferenceEngine::MemoryBlob>(inputBlob);
// locked memory holder should be alive all time while access to its buffer happens // locked memory holder should be alive all time while access to its buffer happens
auto minputHolder = minput->wmap(); auto minputHolder = minput->wmap();
auto inputBlobData = minputHolder.as<T *>(); auto inputBlobData = minputHolder.as<T *>();
for (size_t b = 0; b < batchSize; b++) { for (size_t b = 0; b < batchSize; b++) {
size_t iminfoSize = inputBlob->size()/batchSize; size_t iminfoSize = inputBlob->size() / batchSize;
for (size_t i = 0; i < iminfoSize; i++) { for (size_t i = 0; i < iminfoSize; i++) {
size_t index = b*iminfoSize + i; size_t index = b * iminfoSize + i;
if (0 == i) if (0 == i)
inputBlobData[index] = static_cast<T>(image_size.first); inputBlobData[index] = static_cast<T>(image_size.first);
else if (1 == i) else if (1 == i)
inputBlobData[index] = static_cast<T>(image_size.second); inputBlobData[index] = static_cast<T>(image_size.second);
else else
inputBlobData[index] = 1; inputBlobData[index] = 1;
}
} }
}
} }
@ -109,5 +124,11 @@ void fillBlobImInfo(Blob::Ptr& inputBlob,
* @brief Fill InferRequest blobs with random values or image information * @brief Fill InferRequest blobs with random values or image information
*/ */
void fillBlobs(InferenceEngine::InferRequest inferRequest, void fillBlobs(InferenceEngine::InferRequest inferRequest,
const InferenceEngine::ConstInputsDataMap& inputsInfo, const InferenceEngine::ConstInputsDataMap &inputsInfo,
const size_t& batchSize); const size_t &batchSize);
/**
* @brief Fill InferRequest tensors with random values or image information
*/
void fillTensors(ov::InferRequest &infer_request,
const std::vector<ov::Output<ov::Node>> &inputs);

View File

@ -3,6 +3,10 @@
<value>CPU</value> <value>CPU</value>
<value>GPU</value> <value>GPU</value>
</devices> </devices>
<api_versions>
<value>1</value>
<value>2</value>
</api_versions>
<models> <models>
<!--Models with FP32 precision--> <!--Models with FP32 precision-->
<model name="mobilenet-v2-1.4-224" precision="FP32" source="omz" /> <model name="mobilenet-v2-1.4-224" precision="FP32" source="omz" />

View File

@ -2,6 +2,10 @@
<devices> <devices>
<value>MYRIAD</value> <value>MYRIAD</value>
</devices> </devices>
<api_versions>
<value>1</value>
<value>2</value>
</api_versions>
<models> <models>
<!--Models with FP32 precision--> <!--Models with FP32 precision-->
<model name="mobilenet-v2-1.4-224" precision="FP32" source="omz" /> <model name="mobilenet-v2-1.4-224" precision="FP32" source="omz" />

View File

@ -3,6 +3,10 @@
<value>CPU</value> <value>CPU</value>
<value>GPU</value> <value>GPU</value>
</devices> </devices>
<api_versions>
<value>1</value>
<value>2</value>
</api_versions>
<models> <models>
<!--Models with FP32 precision--> <!--Models with FP32 precision-->
<model name="vgg16" precision="FP32" source="omz" /> <model name="vgg16" precision="FP32" source="omz" />

View File

@ -3,6 +3,10 @@
<value>CPU</value> <value>CPU</value>
<value>GPU</value> <value>GPU</value>
</devices> </devices>
<api_versions>
<value>1</value>
<value>2</value>
</api_versions>
<models> <models>
<!--Models with FP32 precision--> <!--Models with FP32 precision-->
<model name="mobilenet-v2-1.4-224" precision="FP32" source="omz" /> <model name="mobilenet-v2-1.4-224" precision="FP32" source="omz" />

View File

@ -14,6 +14,10 @@
<value>CPU</value> <value>CPU</value>
<value>GPU</value> <value>GPU</value>
</devices> </devices>
<api_versions>
<value>1</value>
<value>2</value>
</api_versions>
<models> <models>
<model name="alexnet" precision="FP32" source="omz" /> <model name="alexnet" precision="FP32" source="omz" />
<model name="mobilenet-ssd" precision="FP32" source="omz" /> <model name="mobilenet-ssd" precision="FP32" source="omz" />

View File

@ -13,6 +13,10 @@
<devices> <devices>
<value>MYRIAD</value> <value>MYRIAD</value>
</devices> </devices>
<api_versions>
<value>1</value>
<value>2</value>
</api_versions>
<models> <models>
<model name="alexnet" precision="FP32" source="omz" /> <model name="alexnet" precision="FP32" source="omz" />
<model name="mobilenet-ssd" precision="FP32" source="omz" /> <model name="mobilenet-ssd" precision="FP32" source="omz" />

View File

@ -15,6 +15,10 @@
<value>CPU</value> <value>CPU</value>
<value>GPU</value> <value>GPU</value>
</devices> </devices>
<api_versions>
<value>1</value>
<value>2</value>
</api_versions>
<models> <models>
<model name="alexnet" precision="FP32" source="omz" /> <model name="alexnet" precision="FP32" source="omz" />
<model name="mobilenet-ssd" precision="FP32" source="omz" /> <model name="mobilenet-ssd" precision="FP32" source="omz" />

View File

@ -10,162 +10,120 @@
#include <string> #include <string>
#include <inference_engine.hpp> #include <inference_engine.hpp>
#include <openvino/openvino.hpp>
using namespace InferenceEngine;
std::function<void()> load_unload_plugin(const std::string &target_device) { std::function<void()> load_unload_plugin(const std::string &target_device, const int &api_version) {
return [&] { return [&] {
Core ie; auto ie_api_wrapper = create_infer_api_wrapper(api_version);
// GetVersions silently register plugin in `plugins` through `GetCPPPluginByName` // get_versions silently register plugin in `plugins` through `GetCPPPluginByName`
ie.GetVersions(target_device); ie_api_wrapper->load_plugin(target_device);
// Remove plugin for target_device from `plugins` // Remove plugin for target_device from `plugins`
ie.UnregisterPlugin(target_device); ie_api_wrapper->unload_plugin(target_device);
}; };
} }
std::function<void()> read_cnnnetwork(const std::string &model) { std::function<void()> read_cnnnetwork(const std::string &model, const int &api_version) {
return [&] { return [&] {
Core ie; auto ie_api_wrapper = create_infer_api_wrapper(api_version);
CNNNetwork cnnNetwork = ie.ReadNetwork(model); ie_api_wrapper->read_network(model);
}; };
} }
std::function<void()> cnnnetwork_reshape_batch_x2(const std::string &model) { std::function<void()> cnnnetwork_reshape_batch_x2(const std::string &model, const int &iter, const int &api_version) {
return [&] { return [&] {
Core ie; auto ie_api_wrapper = create_infer_api_wrapper(api_version);
CNNNetwork cnnNetwork = ie.ReadNetwork(model); ie_api_wrapper->read_network(model);
const InputsDataMap inputInfo(cnnNetwork.getInputsInfo()); ie_api_wrapper->change_batch_size(2, iter);
ICNNNetwork::InputShapes shapes = cnnNetwork.getInputShapes();
bool doReshape = false;
for (const InputsDataMap::value_type& input : inputInfo) {
int batchIndex = -1;
auto layout = input.second->getTensorDesc().getLayout();
if ((layout == Layout::NCHW) || (layout == Layout::NCDHW) ||
(layout == Layout::NHWC) || (layout == Layout::NDHWC) ||
(layout == Layout::NC)) {
batchIndex = 0;
} else if (layout == CN) {
batchIndex = 1;
}
if (batchIndex != -1) {
shapes[input.first][batchIndex] *= 2;
doReshape = true;
}
}
if (doReshape)
cnnNetwork.reshape(shapes);
else
throw std::logic_error("Reshape wasn't applied for a model.");
}; };
} }
std::function<void()> set_input_params(const std::string &model) { std::function<void()> set_input_params(const std::string &model, const int &api_version) {
return [&] { return [&] {
Core ie; auto ie_api_wrapper = create_infer_api_wrapper(api_version);
CNNNetwork cnnNetwork = ie.ReadNetwork(model); ie_api_wrapper->read_network(model);
InputsDataMap inputInfo(cnnNetwork.getInputsInfo()); ie_api_wrapper->set_input_params(model);
for (auto &input : inputInfo) {
input.second->getPreProcess().setResizeAlgorithm(NO_RESIZE);
input.second->setPrecision(Precision::U8);
if (input.second->getInputData()->getTensorDesc().getDims().size() == 4)
input.second->setLayout(Layout::NCHW);
else if (input.second->getInputData()->getTensorDesc().getDims().size() == 2)
input.second->setLayout(Layout::NC);
else
throw std::logic_error("Setting of input parameters wasn't applied for a model.");
}
}; };
} }
std::function<void()> create_exenetwork(const std::string &model, const std::string &target_device) { std::function<void()>
create_compiled_model(const std::string &model, const std::string &target_device, const int &api_version) {
return [&] { return [&] {
Core ie; auto ie_api_wrapper = create_infer_api_wrapper(api_version);
CNNNetwork cnnNetwork = ie.ReadNetwork(model); ie_api_wrapper->read_network(model);
ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, target_device); ie_api_wrapper->load_network(target_device);
}; };
} }
std::function<void()> recreate_exenetwork(Core &ie, const std::string &model, const std::string &target_device) { std::function<void()> recreate_compiled_model(std::shared_ptr<InferApiBase> &ie_wrapper, const std::string &model,
const std::string &target_device, const int &api_version) {
return [&] { return [&] {
CNNNetwork cnnNetwork = ie.ReadNetwork(model); ie_wrapper->load_plugin(target_device);
ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, target_device); ie_wrapper->read_network(model);
}; ie_wrapper->load_network(target_device);
}
std::function<void()> create_infer_request(const std::string &model, const std::string &target_device) {
return [&] {
Core ie;
CNNNetwork cnnNetwork = ie.ReadNetwork(model);
ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, target_device);
InferRequest infer_request = exeNetwork.CreateInferRequest();
}; };
} }
std::function<void()> recreate_infer_request(InferenceEngine::ExecutableNetwork& exeNetwork) { std::function<void()>
create_infer_request(const std::string &model, const std::string &target_device, const int &api_version) {
return [&] { return [&] {
InferRequest infer_request = exeNetwork.CreateInferRequest(); auto ie_api_wrapper = create_infer_api_wrapper(api_version);
ie_api_wrapper->read_network(model);
ie_api_wrapper->load_network(target_device);
ie_api_wrapper->create_infer_request();
}; };
} }
std::function<void()> infer_request_inference(const std::string &model, const std::string &target_device) {
std::function<void()> recreate_infer_request(std::shared_ptr<InferApiBase> &ie_wrapper) {
return [&] { return [&] {
Core ie; ie_wrapper->create_infer_request();
CNNNetwork cnnNetwork = ie.ReadNetwork(model);
ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, target_device);
InferRequest infer_request = exeNetwork.CreateInferRequest();
auto batchSize = cnnNetwork.getBatchSize();
batchSize = batchSize != 0 ? batchSize : 1;
const InferenceEngine::ConstInputsDataMap inputsInfo(exeNetwork.GetInputsInfo());
fillBlobs(infer_request, inputsInfo, batchSize);
infer_request.Infer();
OutputsDataMap output_info(cnnNetwork.getOutputsInfo());
for (auto &output : output_info)
Blob::Ptr outputBlob = infer_request.GetBlob(output.first);
}; };
} }
std::function<void()> reinfer_request_inference(InferenceEngine::InferRequest& infer_request, InferenceEngine::OutputsDataMap& output_info) {
std::function<void()>
infer_request_inference(const std::string &model, const std::string &target_device, const int &api_version) {
return [&] { return [&] {
infer_request.Infer(); auto ie_api_wrapper = create_infer_api_wrapper(api_version);
for (auto &output : output_info) ie_api_wrapper->read_network(model);
Blob::Ptr outputBlob = infer_request.GetBlob(output.first); ie_api_wrapper->load_network(target_device);
ie_api_wrapper->create_infer_request();
ie_api_wrapper->prepare_input();
ie_api_wrapper->infer();
}; };
} }
std::function<void()> inference_with_streams(const std::string &model, const std::string &target_device, const int& nstreams) {
std::function<void()> reinfer_request_inference(std::shared_ptr<InferApiBase> &ie_wrapper) {
return [&] { return [&] {
std::map<std::string, std::string> config; ie_wrapper->infer();
config[target_device + "_THROUGHPUT_STREAMS"] = std::to_string(nstreams); };
}
Core ie;
ie.GetVersions(target_device);
ie.SetConfig(config, target_device);
InferRequest inferRequest;
CNNNetwork cnnNetwork = ie.ReadNetwork(model);
ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, target_device);
auto batchSize = cnnNetwork.getBatchSize();
batchSize = batchSize != 0 ? batchSize : 1;
const InferenceEngine::ConstInputsDataMap inputsInfo(exeNetwork.GetInputsInfo());
std::function<void()>
inference_with_streams(const std::string &model, const std::string &target_device, const int &nstreams,
const int &api_version) {
return [&] {
unsigned int nireq = nstreams; unsigned int nireq = nstreams;
auto ie_api_wrapper = create_infer_api_wrapper(api_version);
ie_api_wrapper->load_plugin(target_device);
ie_api_wrapper->set_config(target_device, "THROUGHPUT_STREAMS", nstreams);
ie_api_wrapper->read_network(model);
ie_api_wrapper->load_network(target_device);
try { try {
nireq = exeNetwork.GetMetric(METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS)).as<unsigned int>(); nireq = ie_api_wrapper->get_property(METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS));
} catch (const std::exception &ex) { } catch (const std::exception &ex) {
log_err("Failed to query OPTIMAL_NUMBER_OF_INFER_REQUESTS"); log_err("Failed to query OPTIMAL_NUMBER_OF_INFER_REQUESTS");
} }
for (int counter = 0; counter < nireq; counter++) {
inferRequest = exeNetwork.CreateInferRequest();
fillBlobs(inferRequest, inputsInfo, batchSize);
inferRequest.Infer(); for (int counter = 0; counter < nireq; counter++) {
OutputsDataMap output_info(cnnNetwork.getOutputsInfo()); ie_api_wrapper->create_infer_request();
for (auto &output : output_info) ie_api_wrapper->prepare_input();
Blob::Ptr outputBlob = inferRequest.GetBlob(output.first);
ie_api_wrapper->infer();
} }
}; };
} }

View File

@ -5,15 +5,33 @@
#include <string> #include <string>
#include <functional> #include <functional>
#include <inference_engine.hpp> #include <inference_engine.hpp>
#include "../infer_api/infer_api.h"
std::function<void()> load_unload_plugin(const std::string &target_device); std::function<void()> load_unload_plugin(const std::string &target_device, const int &api_version);
std::function<void()> read_cnnnetwork(const std::string &model);
std::function<void()> cnnnetwork_reshape_batch_x2(const std::string &model); std::function<void()> read_cnnnetwork(const std::string &model, const int &api_version);
std::function<void()> set_input_params(const std::string &model);
std::function<void()> create_exenetwork(const std::string &model, const std::string &target_device); std::function<void()> cnnnetwork_reshape_batch_x2(const std::string &model, const int &iter, const int &api_version);
std::function<void()> recreate_exenetwork(InferenceEngine::Core &ie, const std::string &model, const std::string &target_device);
std::function<void()> create_infer_request(const std::string &model, const std::string &target_device); std::function<void()> set_input_params(const std::string &model, const int &api_version);
std::function<void()> recreate_infer_request(InferenceEngine::ExecutableNetwork& exeNetwork);
std::function<void()> infer_request_inference(const std::string &model, const std::string &target_device); std::function<void()>
std::function<void()> reinfer_request_inference(InferenceEngine::InferRequest& infer_request, InferenceEngine::OutputsDataMap& output_info); create_compiled_model(const std::string &model, const std::string &target_device, const int &api_version);
std::function<void()> inference_with_streams(const std::string &model, const std::string &target_device, const int& nstreams);
std::function<void()>
create_infer_request(const std::string &model, const std::string &target_device, const int &api_version);
std::function<void()>
infer_request_inference(const std::string &model, const std::string &target_device, const int &api_version);
std::function<void()>
inference_with_streams(const std::string &model, const std::string &target_device, const int &nstreams,
const int &api_version);
std::function<void()>
recreate_compiled_model(std::shared_ptr<InferApiBase> &ie, const std::string &model, const std::string &target_device,
const int &api_version);
std::function<void()> recreate_infer_request(std::shared_ptr<InferApiBase> &ie_wrapper);
std::function<void()> reinfer_request_inference(std::shared_ptr<InferApiBase> &ie_wrapper);

View File

@ -0,0 +1,188 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "infer_api.h"
#include "openvino/core/preprocess/pre_post_process.hpp"
InferAPI1::InferAPI1() = default;
void InferAPI1::load_plugin(const std::string &device) {
ie.GetVersions(device);
}
void InferAPI1::unload_plugin(const std::string &device) {
ie.UnregisterPlugin(device);
}
void InferAPI1::read_network(const std::string &model) {
cnnNetwork = ie.ReadNetwork(model);
inputsInfo = cnnNetwork.getInputsInfo();
InferenceEngine::ICNNNetwork::InputShapes shapes = cnnNetwork.getInputShapes();
for (const auto &input: inputsInfo) {
original_batch_size = shapes[input.first][0];
}
original_batch_size = original_batch_size ? original_batch_size : 1;
}
void InferAPI1::load_network(const std::string &device) {
exeNetwork = ie.LoadNetwork(cnnNetwork, device);
}
void InferAPI1::create_infer_request() {
inferRequest = exeNetwork.CreateInferRequest();
}
void InferAPI1::prepare_input() {
auto batchSize = cnnNetwork.getBatchSize();
batchSize = batchSize != 0 ? batchSize : 1;
fillBlobs(inferRequest, exeNetwork.GetInputsInfo(), batchSize);
}
void InferAPI1::infer() {
inferRequest.Infer();
for (auto &output: outputInfo) {
InferenceEngine::Blob::Ptr outputBlob = inferRequest.GetBlob(output.first);
}
}
void InferAPI1::change_batch_size(int multiplier, int cur_iter) {
bool doReshape = false;
auto shapes = cnnNetwork.getInputShapes();
int new_batch_size = ((cur_iter % 2) == 0) ? original_batch_size * multiplier : original_batch_size;
for (const auto &input: inputsInfo) {
int batchIndex = -1;
auto layout = input.second->getTensorDesc().getLayout();
if ((layout == InferenceEngine::Layout::NCHW) || (layout == InferenceEngine::Layout::NCDHW) ||
(layout == InferenceEngine::Layout::NHWC) || (layout == InferenceEngine::Layout::NDHWC) ||
(layout == InferenceEngine::Layout::NC)) {
batchIndex = 0;
} else if (layout == InferenceEngine::CN) {
batchIndex = 1;
}
if (batchIndex != -1) {
shapes[input.first][batchIndex] = new_batch_size;
doReshape = true;
}
}
if (doReshape)
cnnNetwork.reshape(shapes);
else
throw std::logic_error("Reshape wasn't applied for a model.");
}
void InferAPI1::set_input_params(const std::string &model) {
cnnNetwork = ie.ReadNetwork(model);
InferenceEngine::InputsDataMap inputInfo(cnnNetwork.getInputsInfo());
for (auto &input: inputInfo) {
input.second->getPreProcess().setResizeAlgorithm(InferenceEngine::NO_RESIZE);
input.second->setPrecision(InferenceEngine::Precision::U8);
if (input.second->getInputData()->getTensorDesc().getDims().size() == 4)
input.second->setLayout(InferenceEngine::Layout::NCHW);
else if (input.second->getInputData()->getTensorDesc().getDims().size() == 2)
input.second->setLayout(InferenceEngine::Layout::NC);
else
throw std::logic_error("Setting of input parameters wasn't applied for a model.");
}
}
void InferAPI1::set_config(const std::string &device, const std::string &property, int nstreams) {
config[device + "_" + property] = std::to_string(nstreams);
ie.SetConfig(config, device);
}
unsigned int InferAPI1::get_property(const std::string &name) {
return exeNetwork.GetMetric(name).as<unsigned int>();
}
InferAPI2::InferAPI2() = default;
void InferAPI2::load_plugin(const std::string &device) {
ie.get_versions(device);
}
void InferAPI2::unload_plugin(const std::string &device) {
ie.unload_plugin(device);
}
void InferAPI2::read_network(const std::string &model) {
network = ie.read_model(model);
inputs = network->inputs();
for (const auto &input: inputs) {
auto tensor_shape = input.get_shape();
original_batch_size = tensor_shape[0];
original_batch_size = original_batch_size ? original_batch_size : 1;
}
}
void InferAPI2::load_network(const std::string &device) {
compiled_model = ie.compile_model(network, device);
}
void InferAPI2::create_infer_request() {
infer_request = compiled_model.create_infer_request();
}
void InferAPI2::prepare_input() {
fillTensors(infer_request, inputs);
}
void InferAPI2::infer() {
infer_request.infer();
for (size_t i = 0; i < outputs.size(); ++i) {
const auto &output_tensor = infer_request.get_output_tensor(i);
}
}
void InferAPI2::change_batch_size(int multiplier, int cur_iter) {
int new_batch_size = ((cur_iter % 2) == 0) ? original_batch_size * multiplier : original_batch_size;
for (auto &input: inputs) {
auto tensor_shape = input.get_shape();
tensor_shape[0] = new_batch_size;
network->reshape({{input.get_any_name(), tensor_shape}});
}
}
void InferAPI2::set_config(const std::string &device, const std::string &property, int nstreams) {
config[device + "_" + property] = std::to_string(nstreams);
ie.set_property(device, config);
}
unsigned int InferAPI2::get_property(const std::string &name) {
return compiled_model.get_property(name).as<unsigned int>();
}
void InferAPI2::set_input_params(const std::string &model) {
network = ie.read_model(model);
inputs = network->inputs();
auto ppp = ov::preprocess::PrePostProcessor(network);
for (size_t i = 0; i < inputs.size(); ++i) {
auto &input_info = ppp.input(i);
if (inputs[i].get_shape().size() == 4) {
input_info.tensor().set_element_type(ov::element::u8).set_layout("NCHW");
input_info.model().set_layout("NCHW");
ppp.input(i).preprocess().resize(ov::preprocess::ResizeAlgorithm::RESIZE_LINEAR);
} else if (inputs[i].get_shape().size() == 2) {
input_info.tensor().set_element_type(ov::element::u8).set_layout("NC");
input_info.model().set_layout("NC");
} else {
throw std::logic_error("Setting of input parameters wasn't applied for a model.");
}
}
network = ppp.build();
inputs = network->inputs();
}
std::shared_ptr<InferApiBase> create_infer_api_wrapper(const int &api_version) {
if (api_version == 1) {
return std::make_shared<InferAPI1>(InferAPI1());
} else if (api_version == 2) {
return std::make_shared<InferAPI2>(InferAPI2());
} else {
throw std::logic_error("Unsupported API version");
}
}

View File

@ -0,0 +1,111 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <inference_engine.hpp>
#include "openvino/runtime/core.hpp"
#include "common_utils.h"
class InferApiBase {
public:
virtual void load_plugin(const std::string &device) = 0;
virtual void unload_plugin(const std::string &device) = 0;
virtual void read_network(const std::string &model) = 0;
virtual void load_network(const std::string &device) = 0;
virtual void create_infer_request() = 0;
virtual void infer() = 0;
virtual void prepare_input() = 0;
virtual void change_batch_size(int multiplier, int cur_iter) = 0;
virtual void set_input_params(const std::string &model) = 0;
virtual void set_config(const std::string &device, const std::string &property, int nstreams) = 0;
virtual unsigned int get_property(const std::string &name) = 0;
};
class InferAPI1 : public InferApiBase {
public:
InferAPI1();
void load_plugin(const std::string &device) override;
void unload_plugin(const std::string &device) override;
void read_network(const std::string &model) override;
void load_network(const std::string &device) override;
void create_infer_request() override;
void prepare_input() override;
void infer() override;
void change_batch_size(int multiplier, int cur_iter) override;
void set_input_params(const std::string &model) override;
void set_config(const std::string &device, const std::string &property, int nstreams) override;
unsigned int get_property(const std::string &name) override;
private:
InferenceEngine::Core ie;
InferenceEngine::CNNNetwork cnnNetwork;
InferenceEngine::ExecutableNetwork exeNetwork;
InferenceEngine::InferRequest inferRequest;
InferenceEngine::InputsDataMap inputsInfo;
InferenceEngine::OutputsDataMap outputInfo;
int original_batch_size;
std::map<std::string, std::string> config;
};
class InferAPI2 : public InferApiBase {
public:
InferAPI2();
void load_plugin(const std::string &device) override;
void unload_plugin(const std::string &device) override;
void read_network(const std::string &model) override;
void load_network(const std::string &device) override;
void create_infer_request() override;
void prepare_input() override;
void infer() override;
void change_batch_size(int multiplier, int cur_iter) override;
void set_input_params(const std::string &model) override;
void set_config(const std::string &device, const std::string &property, int nstreams) override;
unsigned int get_property(const std::string &name) override;
private:
ov::Core ie;
std::shared_ptr<ov::Model> network;
ov::CompiledModel compiled_model;
ov::InferRequest infer_request;
std::vector<ov::Output<ov::Node>> inputs;
std::vector<ov::Output<ov::Node>> outputs;
int original_batch_size;
std::map<std::string, ov::Any> config;
};
std::shared_ptr<InferApiBase> create_infer_api_wrapper(const int &api_version);

View File

@ -19,28 +19,32 @@ std::vector<TestCase> generateTestsParams(std::initializer_list<std::string> fie
std::vector<TestCase> tests_cases; std::vector<TestCase> tests_cases;
const pugi::xml_document &test_config = Environment::Instance().getTestConfig(); const pugi::xml_document &test_config = Environment::Instance().getTestConfig();
std::vector<int> processes, threads, iterations; std::vector<int> processes, threads, iterations, api_versions;
std::vector<std::string> devices, models, models_names, precisions; std::vector<std::string> devices, models, models_names, precisions;
pugi::xml_node values; pugi::xml_node values;
for (auto field = fields.begin(); field != fields.end(); field++) { for (const auto &field: fields) {
if (*field == "processes") { if (field == "processes") {
values = test_config.child("attributes").child("processes"); values = test_config.child("attributes").child("processes");
for (pugi::xml_node val = values.first_child(); val; val = val.next_sibling()) for (pugi::xml_node val = values.first_child(); val; val = val.next_sibling())
processes.push_back(val.text().as_int()); processes.push_back(val.text().as_int());
} else if (*field == "threads") { } else if (field == "threads") {
values = test_config.child("attributes").child("threads"); values = test_config.child("attributes").child("threads");
for (pugi::xml_node val = values.first_child(); val; val = val.next_sibling()) for (pugi::xml_node val = values.first_child(); val; val = val.next_sibling())
threads.push_back(val.text().as_int()); threads.push_back(val.text().as_int());
} else if (*field == "iterations") { } else if (field == "iterations") {
values = test_config.child("attributes").child("iterations"); values = test_config.child("attributes").child("iterations");
for (pugi::xml_node val = values.first_child(); val; val = val.next_sibling()) for (pugi::xml_node val = values.first_child(); val; val = val.next_sibling())
iterations.push_back(val.text().as_int()); iterations.push_back(val.text().as_int());
} else if (*field == "devices") { } else if (field == "devices") {
values = test_config.child("attributes").child("devices"); values = test_config.child("attributes").child("devices");
for (pugi::xml_node val = values.first_child(); val; val = val.next_sibling()) for (pugi::xml_node val = values.first_child(); val; val = val.next_sibling())
devices.push_back(val.text().as_string()); devices.emplace_back(val.text().as_string());
} else if (*field == "models") { } else if (field == "api_versions") {
values = test_config.child("attributes").child("api_versions");
for (pugi::xml_node val = values.first_child(); val; val = val.next_sibling())
api_versions.push_back(val.text().as_int());
} else if (field == "models") {
values = test_config.child("attributes").child("models"); values = test_config.child("attributes").child("models");
for (pugi::xml_node val = values.first_child(); val; val = val.next_sibling()) { for (pugi::xml_node val = values.first_child(); val; val = val.next_sibling()) {
std::string full_path = val.attribute("full_path").as_string(); std::string full_path = val.attribute("full_path").as_string();
@ -62,18 +66,20 @@ std::vector<TestCase> generateTestsParams(std::initializer_list<std::string> fie
processes = !processes.empty() ? processes : std::vector<int>{1}; processes = !processes.empty() ? processes : std::vector<int>{1};
threads = !threads.empty() ? threads : std::vector<int>{1}; threads = !threads.empty() ? threads : std::vector<int>{1};
iterations = !iterations.empty() ? iterations : std::vector<int>{1}; iterations = !iterations.empty() ? iterations : std::vector<int>{1};
api_versions = !api_versions.empty() ? api_versions : std::vector<int>{1, 2};
devices = !devices.empty() ? devices : std::vector<std::string>{"NULL"}; devices = !devices.empty() ? devices : std::vector<std::string>{"NULL"};
models = !models.empty() ? models : std::vector<std::string>{"NULL"}; models = !models.empty() ? models : std::vector<std::string>{"NULL"};
precisions = !precisions.empty() ? precisions : std::vector<std::string>{"NULL"}; precisions = !precisions.empty() ? precisions : std::vector<std::string>{"NULL"};
models_names = !models_names.empty() ? models_names : std::vector<std::string>{"NULL"}; models_names = !models_names.empty() ? models_names : std::vector<std::string>{"NULL"};
for (auto &numprocesses : processes) for (auto &numprocesses: processes)
for (auto &numthreads : threads) for (auto &numthreads: threads)
for (auto &numiters : iterations) for (auto &numiters: iterations)
for (auto &device : devices) for (auto &api_version: api_versions)
for (int i = 0; i < models.size(); i++) for (auto &device: devices)
tests_cases.push_back(TestCase(numprocesses, numthreads, numiters, device, models[i], for (int i = 0; i < models.size(); i++)
models_names[i], precisions[i])); tests_cases.emplace_back(numprocesses, numthreads, numiters, api_version, device, models[i],
models_names[i], precisions[i]);
return tests_cases; return tests_cases;
} }
@ -93,6 +99,7 @@ std::vector<MemLeaksTestCase> generateTestsParamsMemLeaks() {
numprocesses = device.attribute("processes").as_int(1); numprocesses = device.attribute("processes").as_int(1);
numthreads = device.attribute("threads").as_int(1); numthreads = device.attribute("threads").as_int(1);
numiterations = device.attribute("iterations").as_int(1); numiterations = device.attribute("iterations").as_int(1);
std::vector<int> api_versions{1, 2};
std::vector<std::map<std::string, std::string>> models; std::vector<std::map<std::string, std::string>> models;
@ -104,13 +111,15 @@ std::vector<MemLeaksTestCase> generateTestsParamsMemLeaks() {
"One of the 'model' records from test config doesn't contain 'full_path' or 'path' attributes"); "One of the 'model' records from test config doesn't contain 'full_path' or 'path' attributes");
std::string name = model.attribute("name").as_string(); std::string name = model.attribute("name").as_string();
std::string precision = model.attribute("precision").as_string(); std::string precision = model.attribute("precision").as_string();
std::map<std::string, std::string> model_map{{"name", name}, std::map<std::string, std::string> model_map{{"name", name},
{"path", path}, {"path", path},
{"full_path", full_path}, {"full_path", full_path},
{"precision", precision}}; {"precision", precision}};
models.push_back(model_map); models.push_back(model_map);
} }
tests_cases.push_back(MemLeaksTestCase(numprocesses, numthreads, numiterations, device_name, models)); for (auto api_version: api_versions) {
tests_cases.emplace_back(numprocesses, numthreads, numiterations, api_version, device_name, models);
}
} }
return tests_cases; return tests_cases;
@ -124,15 +133,16 @@ std::string getTestCaseNameMemLeaks(const testing::TestParamInfo<MemLeaksTestCas
return obj.param.test_case_name; return obj.param.test_case_name;
} }
void test_wrapper(const std::function<void(std::string, std::string, int)> &tests_pipeline, const TestCase &params) { void test_wrapper(const std::function<void(std::string, std::string, int, int)> &tests_pipeline,
tests_pipeline(params.model, params.device, params.numiters); const TestCase &params) {
tests_pipeline(params.model, params.device, params.numiters, params.api_version);
} }
void _runTest(const std::function<void(std::string, std::string, int)> &tests_pipeline, const TestCase &params) { void _runTest(const std::function<void(std::string, std::string, int, int)> &tests_pipeline, const TestCase &params) {
run_in_threads(params.numthreads, test_wrapper, tests_pipeline, params); run_in_threads(params.numthreads, test_wrapper, tests_pipeline, params);
} }
void runTest(const std::function<void(std::string, std::string, int)> &tests_pipeline, const TestCase &params) { void runTest(const std::function<void(std::string, std::string, int, int)> &tests_pipeline, const TestCase &params) {
#if DEBUG_MODE #if DEBUG_MODE
tests_pipeline(params.model, params.device, params.numiters); tests_pipeline(params.model, params.device, params.numiters);
#else #else

View File

@ -13,7 +13,9 @@
#include <vector> #include <vector>
enum TestStatus { TEST_NOT_STARTED = 0, TEST_FAILED, TEST_OK }; enum TestStatus {
TEST_NOT_STARTED = 0, TEST_FAILED, TEST_OK
};
using TestResult = std::pair<TestStatus, std::string>; using TestResult = std::pair<TestStatus, std::string>;
@ -22,6 +24,7 @@ public:
int numprocesses; int numprocesses;
int numthreads; int numthreads;
int numiters; int numiters;
int api_version;
std::string precision; std::string precision;
std::string test_case_name; std::string test_case_name;
std::string model_name; std::string model_name;
@ -29,10 +32,10 @@ public:
protected: protected:
// Replace non-alphabetic/numeric symbols with "_" to prevent logging errors // Replace non-alphabetic/numeric symbols with "_" to prevent logging errors
std::string update_item_for_name(const std::string &item) { static std::string update_item_for_name(const std::string &item) {
std::string _item(item); std::string _item(item);
for (std::string::size_type index = 0; index < _item.size(); ++index) { for (char &index: _item) {
if (!isalnum(_item[index]) && _item[index] != '_') _item[index] = '_'; if (!isalnum(index) && index != '_') index = '_';
} }
return _item; return _item;
} }
@ -42,13 +45,15 @@ class TestCase : public TestCaseBase {
public: public:
std::string model; std::string model;
TestCase(int _numprocesses, int _numthreads, int _numiters, std::string _device, const std::string &_model, TestCase(int _numprocesses, int _numthreads, int _numiters, int _api_version, std::string _device,
const std::string &_model,
const std::string &_model_name, const std::string &_precision) { const std::string &_model_name, const std::string &_precision) {
numprocesses = _numprocesses, numthreads = _numthreads, numiters = _numiters, device = _device, model = _model, numprocesses = _numprocesses, numthreads = _numthreads, numiters = _numiters, api_version = _api_version,
model_name = _model_name, precision = _precision; device = _device, model = _model, model_name = _model_name, precision = _precision;
test_case_name = "Numprocesses_" + std::to_string(numprocesses) + "_Numthreads_" + std::to_string(numthreads) + test_case_name = "Numprocesses_" + std::to_string(numprocesses) + "_Numthreads_" + std::to_string(numthreads) +
"_Numiters_" + std::to_string(numiters) + "_Device_" + update_item_for_name(device) + "_Numiters_" + std::to_string(numiters) + "_Device_" + update_item_for_name(device) +
"_Precision_" + update_item_for_name(precision) + "_Model_" + update_item_for_name(model_name); "_Precision_" + update_item_for_name(precision) + "_Model_" + update_item_for_name(model_name)
+ "_API_" + std::to_string(api_version);
} }
}; };
@ -56,12 +61,13 @@ class MemLeaksTestCase : public TestCaseBase {
public: public:
std::vector<std::map<std::string, std::string>> models; std::vector<std::map<std::string, std::string>> models;
MemLeaksTestCase(int _numprocesses, int _numthreads, int _numiters, std::string _device, MemLeaksTestCase(int _numprocesses, int _numthreads, int _numiters, int _api_version, std::string _device,
std::vector<std::map<std::string, std::string>> _models) { std::vector<std::map<std::string, std::string>> _models) {
numprocesses = _numprocesses, numthreads = _numthreads, numiters = _numiters, device = _device, numprocesses = _numprocesses, numthreads = _numthreads, numiters = _numiters, api_version = _api_version,
models = _models; device = _device, models = _models;
test_case_name = "Numprocesses_" + std::to_string(numprocesses) + "_Numthreads_" + std::to_string(numthreads) + test_case_name = "Numprocesses_" + std::to_string(numprocesses) + "_Numthreads_" + std::to_string(numthreads) +
"_Numiters_" + std::to_string(numiters) + "_Device_" + update_item_for_name(device); "_Numiters_" + std::to_string(numiters) + "_Device_" + update_item_for_name(device) + "_API_" +
std::to_string(api_version);
for (int i = 0; i < models.size(); i++) { for (int i = 0; i < models.size(); i++) {
test_case_name += "_Model" + std::to_string(i + 1) + "_" + update_item_for_name(models[i]["name"]) + "_" + test_case_name += "_Model" + std::to_string(i + 1) + "_" + update_item_for_name(models[i]["name"]) + "_" +
update_item_for_name(models[i]["precision"]); update_item_for_name(models[i]["precision"]);
@ -76,7 +82,9 @@ private:
bool _collect_results_only = false; bool _collect_results_only = false;
Environment() = default; Environment() = default;
Environment(const Environment &) = delete; Environment(const Environment &) = delete;
Environment &operator=(const Environment &) = delete; Environment &operator=(const Environment &) = delete;
public: public:
@ -86,14 +94,21 @@ public:
} }
const pugi::xml_document &getTestConfig(); const pugi::xml_document &getTestConfig();
void setTestConfig(const pugi::xml_document &test_config); void setTestConfig(const pugi::xml_document &test_config);
}; };
std::vector<TestCase> generateTestsParams(std::initializer_list<std::string> items); std::vector<TestCase> generateTestsParams(std::initializer_list<std::string> items);
std::vector<MemLeaksTestCase> generateTestsParamsMemLeaks(); std::vector<MemLeaksTestCase> generateTestsParamsMemLeaks();
std::string getTestCaseName(const testing::TestParamInfo<TestCase> &obj); std::string getTestCaseName(const testing::TestParamInfo<TestCase> &obj);
std::string getTestCaseNameMemLeaks(const testing::TestParamInfo<MemLeaksTestCase> &obj); std::string getTestCaseNameMemLeaks(const testing::TestParamInfo<MemLeaksTestCase> &obj);
void runTest(const std::function<void(std::string, std::string, int)> &tests_pipeline, const TestCase &params); void runTest(const std::function<void(std::string, std::string, int, int)> &tests_pipeline, const TestCase &params);
void _runTest(const std::function<void(std::string, std::string, int)> &tests_pipeline, const TestCase &params);
void test_wrapper(const std::function<void(std::string, std::string, int)> &tests_pipeline, const TestCase &params); void _runTest(const std::function<void(std::string, std::string, int, int)> &tests_pipeline, const TestCase &params);
void test_wrapper(const std::function<void(std::string, std::string, int, int)> &tests_pipeline,
const TestCase &params);

View File

@ -4,6 +4,7 @@
#include "tests_utils.h" #include "tests_utils.h"
#include "../common/tests_utils.h" #include "../common/tests_utils.h"
#include "../common/infer_api/infer_api.h"
#include "common_utils.h" #include "common_utils.h"
#include "../common/managers/thread_manager.h" #include "../common/managers/thread_manager.h"
#include "tests_pipelines/tests_pipelines.h" #include "tests_pipelines/tests_pipelines.h"
@ -11,8 +12,7 @@
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include <inference_engine.hpp> #include <inference_engine.hpp>
#include <openvino/runtime/core.hpp>
using namespace InferenceEngine;
class MemCheckTestSuite : public ::testing::TestWithParam<TestCase> { class MemCheckTestSuite : public ::testing::TestWithParam<TestCase> {
@ -21,11 +21,10 @@ public:
TestReferences test_refs; TestReferences test_refs;
void SetUp() override { void SetUp() override {
const ::testing::TestInfo* const test_info = ::testing::UnitTest::GetInstance()->current_test_info(); const ::testing::TestInfo *const test_info = ::testing::UnitTest::GetInstance()->current_test_info();
test_name = std::string(test_info->name()).substr(0, std::string(test_info->name()).find('/')); test_name = std::string(test_info->name()).substr(0, std::string(test_info->name()).find('/'));
//const std::string full_test_name = std::string(test_info->test_case_name()) + "." + std::string(test_info->name());
const auto& test_params = GetParam(); const auto &test_params = GetParam();
model = test_params.model; model = test_params.model;
model_name = test_params.model_name; model_name = test_params.model_name;
device = test_params.device; device = test_params.device;
@ -33,13 +32,13 @@ public:
test_refs.collect_vm_values_for_test(test_name, test_params); test_refs.collect_vm_values_for_test(test_name, test_params);
EXPECT_GT(test_refs.references[VMSIZE], 0) << "Reference value of VmSize is less than 0. Value: " EXPECT_GT(test_refs.references[VMSIZE], 0) << "Reference value of VmSize is less than 0. Value: "
<< test_refs.references[VMSIZE]; << test_refs.references[VMSIZE];
EXPECT_GT(test_refs.references[VMPEAK], 0) << "Reference value of VmPeak is less than 0. Value: " EXPECT_GT(test_refs.references[VMPEAK], 0) << "Reference value of VmPeak is less than 0. Value: "
<< test_refs.references[VMPEAK]; << test_refs.references[VMPEAK];
EXPECT_GT(test_refs.references[VMRSS], 0) << "Reference value of VmRSS is less than 0. Value: " EXPECT_GT(test_refs.references[VMRSS], 0) << "Reference value of VmRSS is less than 0. Value: "
<< test_refs.references[VMRSS]; << test_refs.references[VMRSS];
EXPECT_GT(test_refs.references[VMHWM], 0) << "Reference value of VmHWM is less than 0. Value: " EXPECT_GT(test_refs.references[VMHWM], 0) << "Reference value of VmHWM is less than 0. Value: "
<< test_refs.references[VMHWM]; << test_refs.references[VMHWM];
} }
}; };
@ -48,17 +47,15 @@ TEST_P(MemCheckTestSuite, create_exenetwork) {
log_info("Create ExecutableNetwork from network: \"" << model log_info("Create ExecutableNetwork from network: \"" << model
<< "\" with precision: \"" << precision << "\" with precision: \"" << precision
<< "\" for device: \"" << device << "\""); << "\" for device: \"" << device << "\"");
auto test_pipeline = [&]{ auto test_params = GetParam();
MemCheckPipeline memCheckPipeline; MemCheckPipeline memCheckPipeline;
auto test_pipeline = [&] {
Core ie; auto ie_api_wrapper = create_infer_api_wrapper(test_params.api_version);
ie.GetVersions(device); ie_api_wrapper->load_plugin(device);
CNNNetwork cnnNetwork = ie.ReadNetwork(model); ie_api_wrapper->read_network(model);
ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, device); ie_api_wrapper->load_network(device);
log_info("Memory consumption after LoadNetwork:"); log_info("Memory consumption after LoadNetwork:");
memCheckPipeline.record_measures(test_name); memCheckPipeline.record_measures(test_name);
log_debug(memCheckPipeline.get_reference_record_for_test(test_name, model_name, precision, device)); log_debug(memCheckPipeline.get_reference_record_for_test(test_name, model_name, precision, device));
return memCheckPipeline.measure(); return memCheckPipeline.measure();
}; };
@ -71,25 +68,16 @@ TEST_P(MemCheckTestSuite, infer_request_inference) {
log_info("Inference of InferRequest from network: \"" << model log_info("Inference of InferRequest from network: \"" << model
<< "\" with precision: \"" << precision << "\" with precision: \"" << precision
<< "\" for device: \"" << device << "\""); << "\" for device: \"" << device << "\"");
auto test_pipeline = [&]{ auto test_params = GetParam();
MemCheckPipeline memCheckPipeline; MemCheckPipeline memCheckPipeline;
auto test_pipeline = [&] {
Core ie; auto ie_api_wrapper = create_infer_api_wrapper(test_params.api_version);
ie.GetVersions(device); ie_api_wrapper->load_plugin(device);
CNNNetwork cnnNetwork = ie.ReadNetwork(model); ie_api_wrapper->read_network(model);
ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, device); ie_api_wrapper->load_network(device);
InferRequest inferRequest = exeNetwork.CreateInferRequest(); ie_api_wrapper->create_infer_request();
ie_api_wrapper->prepare_input();
auto batchSize = cnnNetwork.getBatchSize(); ie_api_wrapper->infer();
batchSize = batchSize != 0 ? batchSize : 1;
const ConstInputsDataMap inputsInfo(exeNetwork.GetInputsInfo());
fillBlobs(inferRequest, inputsInfo, batchSize);
inferRequest.Infer();
OutputsDataMap output_info(cnnNetwork.getOutputsInfo());
for (auto &output : output_info)
Blob::Ptr outputBlob = inferRequest.GetBlob(output.first);
log_info("Memory consumption after Inference:"); log_info("Memory consumption after Inference:");
memCheckPipeline.record_measures(test_name); memCheckPipeline.record_measures(test_name);
@ -103,11 +91,12 @@ TEST_P(MemCheckTestSuite, infer_request_inference) {
// tests_pipelines/tests_pipelines.cpp // tests_pipelines/tests_pipelines.cpp
INSTANTIATE_TEST_SUITE_P(MemCheckTests, MemCheckTestSuite, INSTANTIATE_TEST_SUITE_P(MemCheckTests, MemCheckTestSuite,
::testing::ValuesIn( ::testing::ValuesIn(
generateTestsParams({"devices", "models"})), generateTestsParams({"devices", "models"})),
getTestCaseName); getTestCaseName);
TEST_P(MemCheckTestSuite, inference_with_streams) { TEST_P(MemCheckTestSuite, inference_with_streams) {
auto test_params = GetParam();
const auto nstreams = 2; const auto nstreams = 2;
log_info("Inference of InferRequest from network: \"" << model log_info("Inference of InferRequest from network: \"" << model
<< "\" with precision: \"" << precision << "\" with precision: \"" << precision
@ -118,37 +107,23 @@ TEST_P(MemCheckTestSuite, inference_with_streams) {
auto test_pipeline = [&] { auto test_pipeline = [&] {
MemCheckPipeline memCheckPipeline; MemCheckPipeline memCheckPipeline;
std::map<std::string, std::string> config;
const std::string key = device + "_THROUGHPUT_STREAMS";
config[device + "_THROUGHPUT_STREAMS"] = std::to_string(nstreams);
Core ie;
ie.GetVersions(device);
ie.SetConfig(config, device);
InferRequest inferRequest;
CNNNetwork cnnNetwork = ie.ReadNetwork(model);
ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, device);
auto batchSize = cnnNetwork.getBatchSize();
batchSize = batchSize != 0 ? batchSize : 1;
const ConstInputsDataMap inputsInfo(exeNetwork.GetInputsInfo());
unsigned int nireq = nstreams; unsigned int nireq = nstreams;
auto ie_api_wrapper = create_infer_api_wrapper(test_params.api_version);
ie_api_wrapper->load_plugin(device);
ie_api_wrapper->set_config(device, "THROUGHPUT_STREAMS", nstreams);
ie_api_wrapper->read_network(model);
ie_api_wrapper->load_network(device);
try { try {
nireq = exeNetwork.GetMetric(METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS)).as<unsigned int>(); nireq = ie_api_wrapper->get_property(METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS));
} catch (const std::exception &ex) { } catch (const std::exception &ex) {
log_err("Failed to query OPTIMAL_NUMBER_OF_INFER_REQUESTS"); log_err("Failed to query OPTIMAL_NUMBER_OF_INFER_REQUESTS");
} }
for (int counter = 0; counter < nireq; counter++) {
inferRequest = exeNetwork.CreateInferRequest();
fillBlobs(inferRequest, inputsInfo, batchSize);
inferRequest.Infer(); for (int counter = 0; counter < nireq; counter++) {
OutputsDataMap output_info(cnnNetwork.getOutputsInfo()); ie_api_wrapper->create_infer_request();
for (auto &output : output_info) ie_api_wrapper->prepare_input();
Blob::Ptr outputBlob = inferRequest.GetBlob(output.first);
ie_api_wrapper->infer();
} }
log_info("Memory consumption after Inference with streams: \"" << nstreams log_info("Memory consumption after Inference with streams: \"" << nstreams

View File

@ -39,7 +39,7 @@ std::array<long, MeasureValueMax> MemCheckPipeline::measure() {
return measures; return measures;
} }
void MemCheckPipeline::record_measures(const std::string & id) { void MemCheckPipeline::record_measures(const std::string &id) {
std::array<long, MeasureValueMax> measures = measure(); std::array<long, MeasureValueMax> measures = measure();
log_debug("[ MEASURE ] " << MEMCHECK_DELIMITER << id); log_debug("[ MEASURE ] " << MEMCHECK_DELIMITER << id);
log_info(util::get_measure_values_headers(MEMCHECK_DELIMITER)); log_info(util::get_measure_values_headers(MEMCHECK_DELIMITER));
@ -62,7 +62,7 @@ std::string MemCheckPipeline::get_reference_record_for_test(std::string test_nam
return ss.str(); return ss.str();
} }
TestResult common_test_pipeline(const std::function<std::array<long, MeasureValueMax>()>& test_pipeline, TestResult common_test_pipeline(const std::function<std::array<long, MeasureValueMax>()> &test_pipeline,
const std::array<long, MeasureValueMax> &references) { const std::array<long, MeasureValueMax> &references) {
log_info("Reference values of virtual memory consumption:"); log_info("Reference values of virtual memory consumption:");
log_info(util::get_measure_values_headers(MEMCHECK_DELIMITER)); log_info(util::get_measure_values_headers(MEMCHECK_DELIMITER));

View File

@ -28,6 +28,7 @@ private:
* @brief Measures values at the current point of time * @brief Measures values at the current point of time
*/ */
std::array<long, MeasureValueMax> _measure(); std::array<long, MeasureValueMax> _measure();
public: public:
/** /**
* @brief Constructs MemCheckPipeline object and * @brief Constructs MemCheckPipeline object and
@ -45,7 +46,7 @@ public:
* @brief Measures values and records aligned measurements using provided identifier * @brief Measures values and records aligned measurements using provided identifier
* provided identifier * provided identifier
*/ */
void record_measures(const std::string & id); void record_measures(const std::string &id);
/** /**
* @brief Prepares string used for fast generation of file with references * @brief Prepares string used for fast generation of file with references
@ -54,6 +55,6 @@ public:
std::string precision, std::string target_device); std::string precision, std::string target_device);
}; };
TestResult common_test_pipeline(const std::function<std::array<long, MeasureValueMax>()>& test_pipeline, TestResult common_test_pipeline(const std::function<std::array<long, MeasureValueMax>()> &test_pipeline,
const std::array<long, MeasureValueMax> &references); const std::array<long, MeasureValueMax> &references);
// tests_pipelines/tests_pipelines.cpp // tests_pipelines/tests_pipelines.cpp

View File

@ -10,21 +10,23 @@
#include <pugixml.hpp> #include <pugixml.hpp>
// Measure values // Measure values
enum MeasureValue { VMRSS = 0, VMHWM, VMSIZE, VMPEAK, THREADS, MeasureValueMax }; enum MeasureValue {
VMRSS = 0, VMHWM, VMSIZE, VMPEAK, THREADS, MeasureValueMax
};
// Measure values headers // Measure values headers
const std::array<std::string, MeasureValueMax> MeasureValueHeader { "VMRSS", "VMHWM", "VMSIZE", "VMPEAK", "THREADS" }; const std::array<std::string, MeasureValueMax> MeasureValueHeader{"VMRSS", "VMHWM", "VMSIZE", "VMPEAK", "THREADS"};
namespace util { namespace util {
template <typename Type> template<typename Type>
static std::string get_measure_values_as_str(const std::array<Type, MeasureValueMax> & array, static std::string get_measure_values_as_str(const std::array<Type, MeasureValueMax> &array,
const std::string & delimiter = "\t\t") { const std::string &delimiter = "\t\t") {
std::string str = std::to_string(*array.begin()); std::string str = std::to_string(*array.begin());
for (auto it = array.begin() + 1; it != array.end(); it++) for (auto it = array.begin() + 1; it != array.end(); it++)
str += delimiter + std::to_string(*it); str += delimiter + std::to_string(*it);
return str; return str;
} }
static std::string get_measure_values_headers(const std::string & delimiter = "\t\t") { static std::string get_measure_values_headers(const std::string &delimiter = "\t\t") {
std::string str = *MeasureValueHeader.begin(); std::string str = *MeasureValueHeader.begin();
for (auto it = MeasureValueHeader.begin() + 1; it != MeasureValueHeader.end(); it++) for (auto it = MeasureValueHeader.begin() + 1; it != MeasureValueHeader.end(); it++)
str += delimiter + *it; str += delimiter + *it;
@ -35,16 +37,20 @@ namespace util {
class MemCheckEnvironment { class MemCheckEnvironment {
private: private:
pugi::xml_document _refs_config; pugi::xml_document _refs_config;
MemCheckEnvironment() = default; MemCheckEnvironment() = default;
MemCheckEnvironment(const MemCheckEnvironment&) = delete;
MemCheckEnvironment& operator=(const MemCheckEnvironment&) = delete; MemCheckEnvironment(const MemCheckEnvironment &) = delete;
MemCheckEnvironment &operator=(const MemCheckEnvironment &) = delete;
public: public:
static MemCheckEnvironment& Instance(){ static MemCheckEnvironment &Instance() {
static MemCheckEnvironment env; static MemCheckEnvironment env;
return env; return env;
} }
const pugi::xml_document & getRefsConfig() { const pugi::xml_document &getRefsConfig() {
return _refs_config; return _refs_config;
} }
@ -60,7 +66,7 @@ private:
public: public:
std::array<long, MeasureValueMax> references; std::array<long, MeasureValueMax> references;
TestReferences () { TestReferences() {
std::fill(references.begin(), references.end(), -1); std::fill(references.begin(), references.end(), -1);
// Parse RefsConfig from MemCheckEnvironment // Parse RefsConfig from MemCheckEnvironment

View File

@ -3,21 +3,22 @@
// //
#include "../common/managers/thread_manager.h" #include "../common/managers/thread_manager.h"
#include "../common/tests_utils.h" #include "../common/infer_api/infer_api.h"
#include "common_utils.h"
#include "tests_pipelines/tests_pipelines.h" #include "tests_pipelines/tests_pipelines.h"
#include <inference_engine.hpp>
#include <gtest/gtest.h> #include <gtest/gtest.h>
using namespace InferenceEngine; using namespace InferenceEngine;
class MemLeaksTestSuiteNoModel : public ::testing::TestWithParam<MemLeaksTestCase> {}; class MemLeaksTestSuiteNoModel : public ::testing::TestWithParam<MemLeaksTestCase> {
};
class MemLeaksTestSuiteNoDevice : public ::testing::TestWithParam<MemLeaksTestCase> {}; class MemLeaksTestSuiteNoDevice : public ::testing::TestWithParam<MemLeaksTestCase> {
};
class MemLeaksTestSuite : public ::testing::TestWithParam<MemLeaksTestCase> {}; class MemLeaksTestSuite : public ::testing::TestWithParam<MemLeaksTestCase> {
};
inline void test_runner(int numthreads, const std::function<TestResult()> &test_function) { inline void test_runner(int numthreads, const std::function<TestResult()> &test_function) {
ThreadManager<TestResult> thr_manager; ThreadManager<TestResult> thr_manager;
@ -29,7 +30,7 @@ inline void test_runner(int numthreads, const std::function<TestResult()> &test_
for (int i = 0; i < numthreads; i++) { for (int i = 0; i < numthreads; i++) {
EXPECT_EQ(statuses[i], ManagerStatus::FINISHED_SUCCESSFULLY) EXPECT_EQ(statuses[i], ManagerStatus::FINISHED_SUCCESSFULLY)
<< "[Thread " << i << "] Thread not finished successfully"; << "[Thread " << i << "] Thread not finished successfully";
EXPECT_EQ(results[i].first, TestStatus::TEST_OK) << "[Thread " << i << "] " << results[i].second; EXPECT_EQ(results[i].first, TestStatus::TEST_OK) << "[Thread " << i << "] " << results[i].second;
} }
} }
@ -38,7 +39,7 @@ inline void test_runner(int numthreads, const std::function<TestResult()> &test_
TEST_P(MemLeaksTestSuiteNoModel, load_unload_plugin) { TEST_P(MemLeaksTestSuiteNoModel, load_unload_plugin) {
auto test_params = GetParam(); auto test_params = GetParam();
std::vector<std::function<void()>> pipeline = {load_unload_plugin(test_params.device)}; std::vector<std::function<void()>> pipeline = {load_unload_plugin(test_params.device, test_params.api_version)};
auto test = [&] { auto test = [&] {
log_info("Load/unload plugin for \"" << test_params.device << "\" device" log_info("Load/unload plugin for \"" << test_params.device << "\" device"
<< " for " << test_params.numiters << " times"); << " for " << test_params.numiters << " times");
@ -51,8 +52,9 @@ TEST_P(MemLeaksTestSuiteNoDevice, read_network) {
auto test_params = GetParam(); auto test_params = GetParam();
std::vector<std::function<void()>> pipeline; std::vector<std::function<void()>> pipeline;
pipeline.reserve(test_params.models.size());
for (int i = 0; i < test_params.models.size(); i++) { for (int i = 0; i < test_params.models.size(); i++) {
pipeline.push_back(read_cnnnetwork(test_params.models[i]["full_path"])); pipeline.push_back(read_cnnnetwork(test_params.models[i]["full_path"], test_params.api_version));
} }
auto test = [&] { auto test = [&] {
log_info("Read networks: " << test_params.model_name << " for " << test_params.numiters << " times"); log_info("Read networks: " << test_params.model_name << " for " << test_params.numiters << " times");
@ -65,8 +67,9 @@ TEST_P(MemLeaksTestSuiteNoDevice, cnnnetwork_reshape_batch_x2) {
auto test_params = GetParam(); auto test_params = GetParam();
std::vector<std::function<void()>> pipeline; std::vector<std::function<void()>> pipeline;
pipeline.reserve(test_params.models.size());
for (int i = 0; i < test_params.models.size(); i++) { for (int i = 0; i < test_params.models.size(); i++) {
pipeline.push_back(cnnnetwork_reshape_batch_x2(test_params.models[i]["full_path"])); pipeline.push_back(cnnnetwork_reshape_batch_x2(test_params.models[i]["full_path"], i, test_params.api_version));
} }
auto test = [&] { auto test = [&] {
log_info("Reshape to batch*=2 of CNNNetworks created from networks: " << test_params.model_name << " for " log_info("Reshape to batch*=2 of CNNNetworks created from networks: " << test_params.model_name << " for "
@ -80,8 +83,9 @@ TEST_P(MemLeaksTestSuiteNoDevice, set_input_params) {
auto test_params = GetParam(); auto test_params = GetParam();
std::vector<std::function<void()>> pipeline; std::vector<std::function<void()>> pipeline;
pipeline.reserve(test_params.models.size());
for (int i = 0; i < test_params.models.size(); i++) { for (int i = 0; i < test_params.models.size(); i++) {
pipeline.push_back(set_input_params(test_params.models[i]["full_path"])); pipeline.push_back(set_input_params(test_params.models[i]["full_path"], test_params.api_version));
} }
auto test = [&] { auto test = [&] {
log_info("Apply preprocessing for CNNNetworks from networks: " << test_params.model_name << " for " log_info("Apply preprocessing for CNNNetworks from networks: " << test_params.model_name << " for "
@ -93,16 +97,18 @@ TEST_P(MemLeaksTestSuiteNoDevice, set_input_params) {
TEST_P(MemLeaksTestSuite, recreate_exenetwork) { TEST_P(MemLeaksTestSuite, recreate_exenetwork) {
auto test_params = GetParam(); auto test_params = GetParam();
Core ie;
std::vector<std::function<void()>> pipeline; std::vector<std::function<void()>> pipeline;
auto ie_wrapper = create_infer_api_wrapper(test_params.api_version);
pipeline.reserve(test_params.models.size());
for (int i = 0; i < test_params.models.size(); i++) { for (int i = 0; i < test_params.models.size(); i++) {
pipeline.push_back(recreate_exenetwork(ie, test_params.models[i]["full_path"], test_params.device)); pipeline.push_back(recreate_compiled_model(ie_wrapper, test_params.models[i]["full_path"], test_params.device,
test_params.api_version));
} }
auto test = [&] { auto test = [&] {
log_info("Recreate ExecutableNetworks within existing InferenceEngine::Core from networks: " log_info("Recreate ExecutableNetworks within existing InferenceEngine::Core from networks: "
<< test_params.model_name << " for \"" << test_params.device << "\" device for " << test_params.model_name << " for \"" << test_params.device << "\" device for "
<< test_params.numiters << " times"); << test_params.numiters << " times");
return common_test_pipeline(pipeline, test_params.numiters); return common_test_pipeline(pipeline, test_params.numiters);
}; };
test_runner(test_params.numthreads, test); test_runner(test_params.numthreads, test);
@ -110,19 +116,17 @@ TEST_P(MemLeaksTestSuite, recreate_exenetwork) {
TEST_P(MemLeaksTestSuite, recreate_infer_request) { TEST_P(MemLeaksTestSuite, recreate_infer_request) {
auto test_params = GetParam(); auto test_params = GetParam();
Core ie;
std::vector<std::function<void()>> pipeline; std::vector<std::function<void()>> pipeline;
std::vector<ExecutableNetwork> exeNetworks; auto ie_wrapper = create_infer_api_wrapper(test_params.api_version);
int n_models = test_params.models.size(); size_t n_models = test_params.models.size();
exeNetworks.reserve(n_models);
for (int i = 0; i < n_models; i++) { for (int i = 0; i < n_models; i++) {
CNNNetwork cnnNetwork = ie.ReadNetwork(test_params.models[i]["full_path"]); ie_wrapper->read_network(test_params.models[i]["full_path"]);
ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, test_params.device); ie_wrapper->load_network(test_params.device);
exeNetworks.push_back(exeNetwork); pipeline.push_back(recreate_infer_request(ie_wrapper));
pipeline.push_back(recreate_infer_request(exeNetworks[i]));
} }
auto test = [&] { auto test = [&] {
log_info("Create InferRequests from networks: " << test_params.model_name << " for \"" << test_params.device log_info("Create InferRequests from networks: " << test_params.model_name << " for \"" << test_params.device
<< "\" device for " << test_params.numiters << " times"); << "\" device for " << test_params.numiters << " times");
@ -133,28 +137,18 @@ TEST_P(MemLeaksTestSuite, recreate_infer_request) {
TEST_P(MemLeaksTestSuite, reinfer_request_inference) { TEST_P(MemLeaksTestSuite, reinfer_request_inference) {
auto test_params = GetParam(); auto test_params = GetParam();
Core ie;
std::vector<std::function<void()>> pipeline; std::vector<std::function<void()>> pipeline;
std::vector<InferRequest> infer_requests; auto ie_wrapper = create_infer_api_wrapper(test_params.api_version);
std::vector<OutputsDataMap> outputs_info; size_t n_models = test_params.models.size();
int n_models = test_params.models.size();
infer_requests.reserve(n_models);
outputs_info.reserve(n_models);
for (int i = 0; i < n_models; i++) { for (int i = 0; i < n_models; i++) {
CNNNetwork cnnNetwork = ie.ReadNetwork(test_params.models[i]["full_path"]); ie_wrapper->read_network(test_params.models[i]["full_path"]);
ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, test_params.device); ie_wrapper->load_network(test_params.device);
InferRequest infer_request = exeNetwork.CreateInferRequest(); ie_wrapper->create_infer_request();
infer_requests.push_back(infer_request); ie_wrapper->prepare_input();
OutputsDataMap output_info(cnnNetwork.getOutputsInfo()); pipeline.push_back(reinfer_request_inference(ie_wrapper));
outputs_info.push_back(output_info);
auto batchSize = cnnNetwork.getBatchSize();
batchSize = batchSize != 0 ? batchSize : 1;
const InferenceEngine::ConstInputsDataMap inputsInfo(exeNetwork.GetInputsInfo());
fillBlobs(infer_requests[i], inputsInfo, batchSize);
pipeline.push_back(reinfer_request_inference(infer_requests[i], outputs_info[i]));
} }
auto test = [&] { auto test = [&] {
log_info("Inference of InferRequests from networks: " << test_params.model_name << " for \"" log_info("Inference of InferRequests from networks: " << test_params.model_name << " for \""
<< test_params.device << "\" device for " << test_params.device << "\" device for "
@ -167,8 +161,10 @@ TEST_P(MemLeaksTestSuite, reinfer_request_inference) {
TEST_P(MemLeaksTestSuite, infer_request_inference) { TEST_P(MemLeaksTestSuite, infer_request_inference) {
auto test_params = GetParam(); auto test_params = GetParam();
std::vector<std::function<void()>> pipeline; std::vector<std::function<void()>> pipeline;
pipeline.reserve(test_params.models.size());
for (int i = 0; i < test_params.models.size(); i++) { for (int i = 0; i < test_params.models.size(); i++) {
pipeline.push_back(infer_request_inference(test_params.models[i]["full_path"], test_params.device)); pipeline.push_back(infer_request_inference(test_params.models[i]["full_path"], test_params.device,
test_params.api_version));
} }
auto test = [&] { auto test = [&] {
log_info("Inference of InferRequests from networks: " << test_params.model_name << " for \"" log_info("Inference of InferRequests from networks: " << test_params.model_name << " for \""
@ -183,8 +179,10 @@ TEST_P(MemLeaksTestSuite, inference_with_streams) {
auto test_params = GetParam(); auto test_params = GetParam();
const auto nstreams = 2; const auto nstreams = 2;
std::vector<std::function<void()>> pipeline; std::vector<std::function<void()>> pipeline;
pipeline.reserve(test_params.models.size());
for (int i = 0; i < test_params.models.size(); i++) { for (int i = 0; i < test_params.models.size(); i++) {
pipeline.push_back(inference_with_streams(test_params.models[i]["full_path"], test_params.device, nstreams)); pipeline.push_back(inference_with_streams(test_params.models[i]["full_path"], test_params.device, nstreams,
test_params.api_version));
} }
auto test = [&] { auto test = [&] {
log_info("Inference of InferRequests from networks: " << test_params.model_name << " for \"" log_info("Inference of InferRequests from networks: " << test_params.model_name << " for \""

View File

@ -28,7 +28,9 @@ using namespace InferenceEngine;
#define THRESHOLD 0.1 #define THRESHOLD 0.1
// Measure values // Measure values
enum MeasureValue { VMRSS = 0, VMHWM, VMSIZE, VMPEAK, THREADS, MeasureValueMax }; enum MeasureValue {
VMRSS = 0, VMHWM, VMSIZE, VMPEAK, THREADS, MeasureValueMax
};
namespace util { namespace util {
template<typename In, typename Out, typename Func> template<typename In, typename Out, typename Func>
@ -42,7 +44,7 @@ namespace util {
} }
}// namespace util }// namespace util
TestResult common_test_pipeline(std::vector<std::function<void()>> test_pipeline, const int &n) { TestResult common_test_pipeline(const std::vector<std::function<void()>> &test_pipeline, const int &n) {
if (AVERAGE_NUM > n) if (AVERAGE_NUM > n)
return TestResult(TestStatus::TEST_FAILED, "Test failed: number of iterations less than defined AVERAGE_NUM"); return TestResult(TestStatus::TEST_FAILED, "Test failed: number of iterations less than defined AVERAGE_NUM");
@ -65,7 +67,7 @@ TestResult common_test_pipeline(std::vector<std::function<void()>> test_pipeline
for (size_t iteration = 1, measure_count = n / AVERAGE_NUM;; iteration++) { for (size_t iteration = 1, measure_count = n / AVERAGE_NUM;; iteration++) {
// run test pipeline and collect metrics // run test pipeline and collect metrics
for (auto step : test_pipeline) step(); for (const auto &step: test_pipeline) step();
getVmValues(cur[VMSIZE], cur[VMPEAK], cur[VMRSS], cur[VMHWM]); getVmValues(cur[VMSIZE], cur[VMPEAK], cur[VMRSS], cur[VMHWM]);
cur[THREADS] = getThreadsNum(); cur[THREADS] = getThreadsNum();
@ -103,8 +105,8 @@ TestResult common_test_pipeline(std::vector<std::function<void()>> test_pipeline
// threshold = THRESHOLD * ref // threshold = THRESHOLD * ref
util::transform(ref, threshold, [](long ref_val) -> float { return THRESHOLD * ref_val; }); util::transform(ref, threshold, [](long ref_val) -> float { return THRESHOLD * ref_val; });
log_info("Setting thresholds:" log_info("Setting thresholds:"
<< " VMRSS=" << ref[VMRSS] << "(+-" << static_cast<int>(threshold[VMRSS]) << ")," << " VMRSS=" << ref[VMRSS] << "(+-" << static_cast<int>(threshold[VMRSS]) << "),"
<< " VMHWM=" << ref[VMHWM] << "(+-" << static_cast<int>(threshold[VMHWM]) << ")"); << " VMHWM=" << ref[VMHWM] << "(+-" << static_cast<int>(threshold[VMHWM]) << ")");
} else if (measure_count <= 0) { } else if (measure_count <= 0) {
// exit from main loop // exit from main loop
break; break;

View File

@ -13,5 +13,5 @@
#include <inference_engine.hpp> #include <inference_engine.hpp>
// tests_pipelines/tests_pipelines.cpp // tests_pipelines/tests_pipelines.cpp
TestResult common_test_pipeline(std::vector<std::function<void()>> test_pipeline, const int &n); TestResult common_test_pipeline(const std::vector<std::function<void()>> &test_pipeline, const int &n);
// tests_pipelines/tests_pipelines.cpp // tests_pipelines/tests_pipelines.cpp

View File

@ -129,6 +129,9 @@ def main():
parser.add_argument('--comparison_report', parser.add_argument('--comparison_report',
required=args.compare, required=args.compare,
help='create comparison report file name') help='create comparison report file name')
parser.add_argument('--gtest_filter',
default='',
help='gtest filter passed to gtest')
args = parser.parse_args() args = parser.parse_args()
@ -150,6 +153,7 @@ def main():
'--output_dir', f'{args.output_dir}', '--output_dir', f'{args.output_dir}',
'--workers', f'{args.workers}', '--workers', f'{args.workers}',
'--timeout', f'{args.timeout}', '--timeout', f'{args.timeout}',
'--gtest_filter', f'{args.gtest_filter}',
args.binary, '--'] + binary_args) args.binary, '--'] + binary_args)
if args.upload or args.timeline_report or args.compare: if args.upload or args.timeline_report or args.compare:

View File

@ -33,8 +33,8 @@ TEST_P(UnitTestSuiteNoDevice, set_input_params) {
runTest(test_set_input_params, GetParam()); runTest(test_set_input_params, GetParam());
} }
TEST_P(UnitTestSuite, create_exenetwork) { TEST_P(UnitTestSuite, create_compiled_model) {
runTest(test_create_exenetwork, GetParam()); runTest(test_create_compiled_model, GetParam());
} }
TEST_P(UnitTestSuite, create_infer_request) { TEST_P(UnitTestSuite, create_infer_request) {
@ -78,13 +78,16 @@ TEST_P(UnitTestSuite, infer_request_inference_full_pipeline) {
// tests_pipelines/tests_pipelines_full_pipeline.cpp // tests_pipelines/tests_pipelines_full_pipeline.cpp
INSTANTIATE_TEST_SUITE_P(StressUnitTests, UnitTestSuiteNoModel, INSTANTIATE_TEST_SUITE_P(StressUnitTests, UnitTestSuiteNoModel,
::testing::ValuesIn(generateTestsParams({"processes", "threads", "iterations", "devices"})), ::testing::ValuesIn(generateTestsParams(
getTestCaseName); {"processes", "threads", "iterations", "devices", "api_versions"})),
getTestCaseName);
INSTANTIATE_TEST_SUITE_P(StressUnitTests, UnitTestSuiteNoDevice, INSTANTIATE_TEST_SUITE_P(StressUnitTests, UnitTestSuiteNoDevice,
::testing::ValuesIn(generateTestsParams({"processes", "threads", "iterations", "models"})), ::testing::ValuesIn(generateTestsParams(
getTestCaseName); {"processes", "threads", "iterations", "models", "api_versions"})),
getTestCaseName);
INSTANTIATE_TEST_SUITE_P(StressUnitTests, UnitTestSuite, INSTANTIATE_TEST_SUITE_P(StressUnitTests, UnitTestSuite,
::testing::ValuesIn(generateTestsParams({"processes", "threads", "iterations", "devices", "models"})), ::testing::ValuesIn(generateTestsParams(
getTestCaseName); {"processes", "threads", "iterations", "devices", "models", "api_versions"})),
getTestCaseName);

View File

@ -11,75 +11,84 @@
using namespace InferenceEngine; using namespace InferenceEngine;
void test_load_unload_plugin(const std::string &model, const std::string &target_device, const int &n) { void test_load_unload_plugin(const std::string &model, const std::string &target_device, const int &n,
const int &api_version) {
log_info("Load/unload plugin for device: " << target_device << " for " << n << " times"); log_info("Load/unload plugin for device: " << target_device << " for " << n << " times");
for (int i = 0; i < n; i++) { for (int i = 0; i < n; i++) {
if (i == n / 2) { if (i == n / 2) {
log_info("Half of the test have already passed"); log_info("Half of the test have already passed");
} }
load_unload_plugin(target_device)(); load_unload_plugin(target_device, api_version)();
} }
} }
void test_read_network(const std::string &model, const std::string &target_device, const int &n) { void test_read_network(const std::string &model, const std::string &target_device, const int &n, const int &api_version) {
log_info("Read network: \"" << model << "\" for " << n << " times"); log_info("Read network: \"" << model << "\" for " << n << " times");
for (int i = 0; i < n; i++) { for (int i = 0; i < n; i++) {
if (i == n / 2) { if (i == n / 2) {
log_info("Half of the test have already passed"); log_info("Half of the test have already passed");
} }
read_cnnnetwork(model)(); read_cnnnetwork(model, api_version)();
} }
} }
void test_cnnnetwork_reshape_batch_x2(const std::string &model, const std::string &target_device, const int &n) { void test_cnnnetwork_reshape_batch_x2(const std::string &model, const std::string &target_device, const int &n,
const int &api_version) {
log_info("Reshape to batch*=2 of CNNNetwork created from network: \"" << model << "\" for " << n << " times"); log_info("Reshape to batch*=2 of CNNNetwork created from network: \"" << model << "\" for " << n << " times");
for (int i = 0; i < n; i++) { for (int i = 0; i < n; i++) {
if (i == n / 2) { if (i == n / 2) {
log_info("Half of the test have already passed"); log_info("Half of the test have already passed");
} }
cnnnetwork_reshape_batch_x2(model)(); cnnnetwork_reshape_batch_x2(model, i, api_version)();
} }
} }
void test_set_input_params(const std::string &model, const std::string &target_device, const int &n) { void test_set_input_params(const std::string &model, const std::string &target_device, const int &n,
const int &api_version) {
log_info("Apply preprocessing for CNNNetwork from network: \"" << model << "\" for " << n << " times"); log_info("Apply preprocessing for CNNNetwork from network: \"" << model << "\" for " << n << " times");
for (int i = 0; i < n; i++) { for (int i = 0; i < n; i++) {
if (i == n / 2) { if (i == n / 2) {
log_info("Half of the test have already passed"); log_info("Half of the test have already passed");
} }
set_input_params(model)(); set_input_params(model, api_version)();
} }
} }
void test_create_exenetwork(const std::string &model, const std::string &target_device, const int &n) { void test_create_compiled_model(const std::string &model, const std::string &target_device, const int &n,
const int &api_version) {
log_info("Create ExecutableNetwork from network: \"" << model log_info("Create ExecutableNetwork from network: \"" << model
<< "\" for device: \"" << target_device << "\" for " << n << " times"); << "\" for device: \"" << target_device << "\" for " << n
<< " times");
for (int i = 0; i < n; i++) { for (int i = 0; i < n; i++) {
if (i == n / 2) { if (i == n / 2) {
log_info("Half of the test have already passed"); log_info("Half of the test have already passed");
} }
create_exenetwork(model, target_device)(); create_compiled_model(model, target_device, api_version)();
} }
} }
void test_create_infer_request(const std::string &model, const std::string &target_device, const int &n) { void test_create_infer_request(const std::string &model, const std::string &target_device, const int &n,
const int &api_version) {
log_info("Create InferRequest from network: \"" << model log_info("Create InferRequest from network: \"" << model
<< "\" for device: \"" << target_device << "\" for " << n << " times"); << "\" for device: \"" << target_device << "\" for " << n
<< " times");
for (int i = 0; i < n; i++) { for (int i = 0; i < n; i++) {
if (i == n / 2) { if (i == n / 2) {
log_info("Half of the test have already passed"); log_info("Half of the test have already passed");
} }
create_infer_request(model, target_device)(); create_infer_request(model, target_device, api_version)();
} }
} }
void test_infer_request_inference(const std::string &model, const std::string &target_device, const int &n) { void test_infer_request_inference(const std::string &model, const std::string &target_device, const int &n,
const int &api_version) {
log_info("Inference of InferRequest from network: \"" << model log_info("Inference of InferRequest from network: \"" << model
<< "\" for device: \"" << target_device << "\" for " << n << " times"); << "\" for device: \"" << target_device << "\" for " << n
<< " times");
for (int i = 0; i < n; i++) { for (int i = 0; i < n; i++) {
if (i == n / 2) { if (i == n / 2) {
log_info("Half of the test have already passed"); log_info("Half of the test have already passed");
} }
infer_request_inference(model, target_device)(); infer_request_inference(model, target_device, api_version)();
} }
} }

View File

@ -10,21 +10,47 @@
#include <string> #include <string>
// tests_pipelines/tests_pipelines.cpp // tests_pipelines/tests_pipelines.cpp
void test_load_unload_plugin(const std::string &model, const std::string &target_device, const int &n); void test_load_unload_plugin(const std::string &model, const std::string &target_device, const int &n,
void test_read_network(const std::string &model, const std::string &target_device, const int &n); const int &api_version);
void test_cnnnetwork_reshape_batch_x2(const std::string &model, const std::string &target_device, const int &n);
void test_set_input_params(const std::string &model, const std::string &target_device, const int &n); void test_read_network(const std::string &model, const std::string &target_device, const int &n,
void test_create_exenetwork(const std::string &model, const std::string &target_device, const int &n); const int &api_version);
void test_create_infer_request(const std::string &model, const std::string &target_device, const int &n);
void test_infer_request_inference(const std::string &model, const std::string &target_device, const int &n); void test_cnnnetwork_reshape_batch_x2(const std::string &model, const std::string &target_device, const int &n,
const int &api_version);
void test_set_input_params(const std::string &model, const std::string &target_device, const int &n,
const int &api_version);
void test_create_compiled_model(const std::string &model, const std::string &target_device, const int &n,
const int &api_version);
void test_create_infer_request(const std::string &model, const std::string &target_device, const int &n,
const int &api_version);
void test_infer_request_inference(const std::string &model, const std::string &target_device, const int &n,
const int &api_version);
// tests_pipelines/tests_pipelines.cpp // tests_pipelines/tests_pipelines.cpp
// tests_pipelines/tests_pipelines_full_pipeline.cpp // tests_pipelines/tests_pipelines_full_pipeline.cpp
void test_load_unload_plugin_full_pipeline(const std::string &model, const std::string &target_device, const int &n); void test_load_unload_plugin_full_pipeline(const std::string &model, const std::string &target_device, const int &n,
void test_read_network_full_pipeline(const std::string &model, const std::string &target_device, const int &n); const int &api_version);
void test_set_input_params_full_pipeline(const std::string &model, const std::string &target_device, const int &n);
void test_cnnnetwork_reshape_batch_x2_full_pipeline(const std::string &model, const std::string &target_device, const int &n); void test_read_network_full_pipeline(const std::string &model, const std::string &target_device, const int &n,
void test_create_exenetwork_full_pipeline(const std::string &model, const std::string &target_device, const int &n); const int &api_version);
void test_create_infer_request_full_pipeline(const std::string &model, const std::string &target_device, const int &n);
void test_infer_request_inference_full_pipeline(const std::string &model, const std::string &target_device, const int &n); void test_set_input_params_full_pipeline(const std::string &model, const std::string &target_device, const int &n,
const int &api_version);
void test_cnnnetwork_reshape_batch_x2_full_pipeline(const std::string &model, const std::string &target_device,
const int &n, const int &api_version);
void test_create_exenetwork_full_pipeline(const std::string &model, const std::string &target_device, const int &n,
const int &api_version);
void test_create_infer_request_full_pipeline(const std::string &model, const std::string &target_device, const int &n,
const int &api_version);
void test_infer_request_inference_full_pipeline(const std::string &model, const std::string &target_device,
const int &n, const int &api_version);
// tests_pipelines/tests_pipelines_full_pipeline.cpp // tests_pipelines/tests_pipelines_full_pipeline.cpp

View File

@ -3,13 +3,10 @@
// //
#include "tests_pipelines.h" #include "tests_pipelines.h"
#include "common_utils.h"
#include <string> #include <string>
#include <openvino/core/preprocess/pre_post_process.hpp>
#include <inference_engine.hpp>
using namespace InferenceEngine;
#define batchIndex 0 #define batchIndex 0
@ -34,247 +31,126 @@ using namespace InferenceEngine;
else \ else \
throw std::logic_error("Reshape wasn't applied for a model."); throw std::logic_error("Reshape wasn't applied for a model.");
void test_load_unload_plugin_full_pipeline(const std::string &model, const std::string &target_device, const int &n) { void test_load_unload_plugin_full_pipeline(const std::string &model, const std::string &target_device, const int &n,
const int &api_version) {
log_info("Load/unload plugin for device: " << target_device << " for " << n << " times"); log_info("Load/unload plugin for device: " << target_device << " for " << n << " times");
Core ie; auto ie_api_wrapper = create_infer_api_wrapper(api_version);
for (int i = 0; i < n; i++) { for (int i = 0; i < n; i++) {
if (i == n / 2) { if (i == n / 2) {
log_info("Half of the test have already passed"); log_info("Half of the test have already passed");
} }
// GetVersions silently register plugin in `plugins` through `GetCPPPluginByName` // get_versions silently register plugin in `plugins` through `GetCPPPluginByName`
ie.GetVersions(target_device); ie_api_wrapper->load_plugin(target_device);
// Remove plugin for target_device from `plugins` // Remove plugin for target_device from `plugins`
ie.UnregisterPlugin(target_device); ie_api_wrapper->unload_plugin(target_device);
} }
CNNNetwork cnnNetwork = ie.ReadNetwork(model); ie_api_wrapper->read_network(model);
InputsDataMap inputInfo(cnnNetwork.getInputsInfo()); ie_api_wrapper->load_network(target_device);
ICNNNetwork::InputShapes shapes = cnnNetwork.getInputShapes(); ie_api_wrapper->create_infer_request();
bool doReshape = false; ie_api_wrapper->prepare_input();
for (auto &input : inputInfo) { ie_api_wrapper->infer();
setInputParameters();
computeShapesToReshape();
}
reshapeCNNNetwork();
ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, target_device);
InferRequest infer_request = exeNetwork.CreateInferRequest();
auto batchSize = cnnNetwork.getBatchSize();
batchSize = batchSize != 0 ? batchSize : 1;
const ConstInputsDataMap inputsInfo(exeNetwork.GetInputsInfo());
fillBlobs(infer_request, inputsInfo, batchSize);
infer_request.Infer();
OutputsDataMap output_info(cnnNetwork.getOutputsInfo());
for (auto &output : output_info)
Blob::Ptr outputBlob = infer_request.GetBlob(output.first);
} }
void test_read_network_full_pipeline(const std::string &model, const std::string &target_device, const int &n) { void test_read_network_full_pipeline(const std::string &model, const std::string &target_device, const int &n,
const int &api_version) {
auto ie_api_wrapper = create_infer_api_wrapper(api_version);
log_info("Read network: \"" << model << "\" for " << n << " times"); log_info("Read network: \"" << model << "\" for " << n << " times");
Core ie;
CNNNetwork cnnNetwork;
for (int i = 0; i < n; i++) { for (int i = 0; i < n; i++) {
if (i == n / 2) { if (i == n / 2) {
log_info("Half of the test have already passed"); log_info("Half of the test have already passed");
} }
cnnNetwork = ie.ReadNetwork(model); ie_api_wrapper->read_network(model);
} }
InputsDataMap inputInfo(cnnNetwork.getInputsInfo()); ie_api_wrapper->load_network(target_device);
ICNNNetwork::InputShapes shapes = cnnNetwork.getInputShapes(); ie_api_wrapper->create_infer_request();
bool doReshape = false; ie_api_wrapper->prepare_input();
for (auto &input : inputInfo) { ie_api_wrapper->infer();
setInputParameters();
computeShapesToReshape();
}
reshapeCNNNetwork();
ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, target_device);
InferRequest infer_request = exeNetwork.CreateInferRequest();
auto batchSize = cnnNetwork.getBatchSize();
batchSize = batchSize != 0 ? batchSize : 1;
const ConstInputsDataMap inputsInfo(exeNetwork.GetInputsInfo());
fillBlobs(infer_request, inputsInfo, batchSize);
infer_request.Infer();
OutputsDataMap output_info(cnnNetwork.getOutputsInfo());
for (auto &output : output_info)
Blob::Ptr outputBlob = infer_request.GetBlob(output.first);
} }
void test_set_input_params_full_pipeline(const std::string &model, const std::string &target_device, const int &n) { void test_set_input_params_full_pipeline(const std::string &model, const std::string &target_device, const int &n,
const int &api_version) {
auto ie_api_wrapper = create_infer_api_wrapper(api_version);
log_info("Apply preprocessing for CNNNetwork from network: \"" << model << "\" for " << n << " times"); log_info("Apply preprocessing for CNNNetwork from network: \"" << model << "\" for " << n << " times");
Core ie;
CNNNetwork cnnNetwork = ie.ReadNetwork(model);
InputsDataMap inputInfo(cnnNetwork.getInputsInfo());
for (int i = 0; i < n; i++) { for (int i = 0; i < n; i++) {
if (i == n / 2) { if (i == n / 2) {
log_info("Half of the test have already passed"); log_info("Half of the test have already passed");
} }
for (auto &input : inputInfo) { ie_api_wrapper->set_input_params(model);
setInputParameters();
}
} }
ICNNNetwork::InputShapes shapes = cnnNetwork.getInputShapes(); ie_api_wrapper->load_network(target_device);
bool doReshape = false; ie_api_wrapper->create_infer_request();
for (auto &input : inputInfo) { ie_api_wrapper->prepare_input();
computeShapesToReshape(); ie_api_wrapper->infer();
}
reshapeCNNNetwork();
ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, target_device);
InferRequest infer_request = exeNetwork.CreateInferRequest();
auto batchSize = cnnNetwork.getBatchSize();
batchSize = batchSize != 0 ? batchSize : 1;
const ConstInputsDataMap inputsInfo(exeNetwork.GetInputsInfo());
fillBlobs(infer_request, inputsInfo, batchSize);
infer_request.Infer();
OutputsDataMap output_info(cnnNetwork.getOutputsInfo());
for (auto &output : output_info)
Blob::Ptr outputBlob = infer_request.GetBlob(output.first);
} }
void test_cnnnetwork_reshape_batch_x2_full_pipeline(const std::string &model, const std::string &target_device, const int &n) { void test_cnnnetwork_reshape_batch_x2_full_pipeline(const std::string &model, const std::string &target_device,
const int &n, const int &api_version) {
auto ie_api_wrapper = create_infer_api_wrapper(api_version);
log_info("Reshape to batch*=2 of CNNNetwork created from network: \"" << model << "\" for " << n << " times"); log_info("Reshape to batch*=2 of CNNNetwork created from network: \"" << model << "\" for " << n << " times");
Core ie; ie_api_wrapper->read_network(model);
CNNNetwork cnnNetwork = ie.ReadNetwork(model);
InputsDataMap inputInfo(cnnNetwork.getInputsInfo());
for (auto &input : inputInfo) {
setInputParameters();
}
ICNNNetwork::InputShapes shapes = cnnNetwork.getInputShapes();
bool doReshape = false;
int prev_batch = -1, new_batch;
for (auto &input : inputInfo) {
auto layout = input.second->getTensorDesc().getLayout();
if ((layout == Layout::NCHW) || (layout == Layout::NC))
prev_batch = shapes[input.first][batchIndex];
}
if (prev_batch == -1)
throw std::logic_error("Reshape wasn't applied for a model.");
for (int i = 0; i < n; i++) { for (int i = 0; i < n; i++) {
if (i == n / 2) { if (i == n / 2) {
log_info("Half of the test have already passed"); log_info("Half of the test have already passed");
} }
ie_api_wrapper->change_batch_size(2, i);
new_batch = ((i % 2) == 0) ? prev_batch * 2 : prev_batch;
for (auto &input : inputInfo) {
auto layout = input.second->getTensorDesc().getLayout();
if ((layout == Layout::NCHW) || (layout == Layout::NC)) {
shapes[input.first][batchIndex] = new_batch;
doReshape = true;
}
}
reshapeCNNNetwork();
} }
ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, target_device); ie_api_wrapper->load_network(target_device);
InferRequest infer_request = exeNetwork.CreateInferRequest(); ie_api_wrapper->create_infer_request();
ie_api_wrapper->prepare_input();
auto batchSize = cnnNetwork.getBatchSize(); ie_api_wrapper->infer();
batchSize = batchSize != 0 ? batchSize : 1;
const ConstInputsDataMap inputsInfo(exeNetwork.GetInputsInfo());
fillBlobs(infer_request, inputsInfo, batchSize);
infer_request.Infer();
OutputsDataMap output_info(cnnNetwork.getOutputsInfo());
for (auto &output : output_info)
Blob::Ptr outputBlob = infer_request.GetBlob(output.first);
} }
void test_create_exenetwork_full_pipeline(const std::string &model, const std::string &target_device, const int &n) { void test_create_exenetwork_full_pipeline(const std::string &model, const std::string &target_device, const int &n,
const int &api_version) {
auto ie_api_wrapper = create_infer_api_wrapper(api_version);
log_info("Create ExecutableNetwork from network: \"" << model log_info("Create ExecutableNetwork from network: \"" << model
<< "\" for device: \"" << target_device << "\" for " << n << " times"); << "\" for device: \"" << target_device << "\" for " << n
Core ie; << " times");
CNNNetwork cnnNetwork = ie.ReadNetwork(model); ie_api_wrapper->read_network(model);
InputsDataMap inputInfo(cnnNetwork.getInputsInfo());
ICNNNetwork::InputShapes shapes = cnnNetwork.getInputShapes();
bool doReshape = false;
for (auto &input : inputInfo) {
setInputParameters();
computeShapesToReshape();
}
reshapeCNNNetwork();
ExecutableNetwork exeNetwork;
for (int i = 0; i < n; i++) { for (int i = 0; i < n; i++) {
if (i == n / 2) { if (i == n / 2) {
log_info("Half of the test have already passed"); log_info("Half of the test have already passed");
} }
exeNetwork = ie.LoadNetwork(cnnNetwork, target_device); ie_api_wrapper->load_network(target_device);
} }
InferRequest infer_request = exeNetwork.CreateInferRequest(); ie_api_wrapper->create_infer_request();
ie_api_wrapper->prepare_input();
auto batchSize = cnnNetwork.getBatchSize(); ie_api_wrapper->infer();
batchSize = batchSize != 0 ? batchSize : 1;
const ConstInputsDataMap inputsInfo(exeNetwork.GetInputsInfo());
fillBlobs(infer_request, inputsInfo, batchSize);
infer_request.Infer();
OutputsDataMap output_info(cnnNetwork.getOutputsInfo());
for (auto &output : output_info)
Blob::Ptr outputBlob = infer_request.GetBlob(output.first);
} }
void test_create_infer_request_full_pipeline(const std::string &model, const std::string &target_device, const int &n) { void test_create_infer_request_full_pipeline(const std::string &model, const std::string &target_device, const int &n,
const int &api_version) {
auto ie_api_wrapper = create_infer_api_wrapper(api_version);
log_info("Create InferRequest from network: \"" << model log_info("Create InferRequest from network: \"" << model
<< "\" for device: \"" << target_device << "\" for " << n << " times"); << "\" for device: \"" << target_device << "\" for " << n
Core ie; << " times");
CNNNetwork cnnNetwork = ie.ReadNetwork(model); ie_api_wrapper->read_network(model);
InputsDataMap inputInfo(cnnNetwork.getInputsInfo()); ie_api_wrapper->load_network(target_device);
ICNNNetwork::InputShapes shapes = cnnNetwork.getInputShapes();
bool doReshape = false;
for (auto &input : inputInfo) {
setInputParameters();
computeShapesToReshape();
}
reshapeCNNNetwork();
ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, target_device);
InferRequest infer_request;
auto batchSize = cnnNetwork.getBatchSize();
batchSize = batchSize != 0 ? batchSize : 1;
const ConstInputsDataMap inputsInfo(exeNetwork.GetInputsInfo());
for (int i = 0; i < n; i++) { for (int i = 0; i < n; i++) {
if (i == n / 2) { if (i == n / 2) {
log_info("Half of the test have already passed"); log_info("Half of the test have already passed");
} }
infer_request = exeNetwork.CreateInferRequest(); ie_api_wrapper->create_infer_request();
fillBlobs(infer_request, inputsInfo, batchSize); ie_api_wrapper->prepare_input();
} }
infer_request.Infer(); ie_api_wrapper->infer();
OutputsDataMap output_info(cnnNetwork.getOutputsInfo());
for (auto &output : output_info)
Blob::Ptr outputBlob = infer_request.GetBlob(output.first);
} }
void test_infer_request_inference_full_pipeline(const std::string &model, const std::string &target_device, const int &n) {
void test_infer_request_inference_full_pipeline(const std::string &model, const std::string &target_device,
const int &n, const int &api_version) {
auto ie_api_wrapper = create_infer_api_wrapper(api_version);
log_info("Inference of InferRequest from network: \"" << model log_info("Inference of InferRequest from network: \"" << model
<< "\" for device: \"" << target_device << "\" for " << n << " times"); << "\" for device: \"" << target_device << "\" for " << n
Core ie; << " times");
CNNNetwork cnnNetwork = ie.ReadNetwork(model); ie_api_wrapper->read_network(model);
InputsDataMap inputInfo(cnnNetwork.getInputsInfo()); ie_api_wrapper->load_network(target_device);
ICNNNetwork::InputShapes shapes = cnnNetwork.getInputShapes(); ie_api_wrapper->create_infer_request();
bool doReshape = false; ie_api_wrapper->prepare_input();
for (auto &input : inputInfo) {
setInputParameters();
computeShapesToReshape();
}
reshapeCNNNetwork();
ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, target_device);
InferRequest infer_request = exeNetwork.CreateInferRequest();
auto batchSize = cnnNetwork.getBatchSize();
batchSize = batchSize != 0 ? batchSize : 1;
const ConstInputsDataMap inputsInfo(exeNetwork.GetInputsInfo());
fillBlobs(infer_request, inputsInfo, batchSize);
for (int i = 0; i < n; i++) { for (int i = 0; i < n; i++) {
if (i == n / 2) { if (i == n / 2) {
log_info("Half of the test have already passed"); log_info("Half of the test have already passed");
} }
infer_request.Infer(); ie_api_wrapper->infer();
OutputsDataMap output_info(cnnNetwork.getOutputsInfo());
for (auto &output : output_info)
Blob::Ptr outputBlob = infer_request.GetBlob(output.first);
} }
} }