Moved stress tests to OV API 2.0 (#9791)

* Moved stress tests to OV API 2.0

* Fix for using ouput index instead of get_index

* Removed ov::runtime namespace in stress tests

* Updated stress tests according to latest changes in OV 2.0

* Fix memleaks tests

* Updated run_memcheck.py to process gtest_filter

* Updated fillTensors, added InferAPI1 and InferAPI2 classes

* Updated test_inference_with_streams

* Updated isImage, comments

* Updated fillTensors to fill image_info inputs with positive pseudo-random numbers

* Removed redundant variable in fillTensors
This commit is contained in:
Lidia Toropova 2022-01-31 17:07:25 +03:00 committed by GitHub
parent 252b7d4728
commit 2f0bcc8542
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
27 changed files with 901 additions and 622 deletions

View File

@ -5,8 +5,8 @@
#include "common_utils.h"
#include <inference_engine.hpp>
#include <openvino/openvino.hpp>
using namespace InferenceEngine;
/**
* @brief Fill InferRequest blobs with random values or image information
@ -15,13 +15,13 @@ void fillBlobs(InferenceEngine::InferRequest inferRequest,
const InferenceEngine::ConstInputsDataMap &inputsInfo,
const size_t &batchSize) {
std::vector<std::pair<size_t, size_t>> input_image_sizes;
for (const ConstInputsDataMap::value_type& item : inputsInfo) {
for (const InferenceEngine::ConstInputsDataMap::value_type &item: inputsInfo) {
if (isImage(item.second))
input_image_sizes.push_back(getTensorHeightWidth(item.second->getTensorDesc()));
}
for (const ConstInputsDataMap::value_type& item : inputsInfo) {
Blob::Ptr inputBlob = inferRequest.GetBlob(item.first);
for (const InferenceEngine::ConstInputsDataMap::value_type &item: inputsInfo) {
InferenceEngine::Blob::Ptr inputBlob = inferRequest.GetBlob(item.first);
if (isImageInfo(inputBlob) && (input_image_sizes.size() == 1)) {
// Fill image information
auto image_size = input_image_sizes.at(0);
@ -31,6 +31,8 @@ void fillBlobs(InferenceEngine::InferRequest inferRequest,
fillBlobImInfo<short>(inputBlob, batchSize, image_size);
} else if (item.second->getPrecision() == InferenceEngine::Precision::I32) {
fillBlobImInfo<int32_t>(inputBlob, batchSize, image_size);
} else if (item.second->getPrecision() == InferenceEngine::Precision::U8) {
fillBlobImInfo<uint8_t>(inputBlob, batchSize, image_size);
} else {
throw std::logic_error("Input precision is not supported for image info!");
}
@ -56,3 +58,31 @@ void fillBlobs(InferenceEngine::InferRequest inferRequest,
}
}
}
/**
* @brief Fill infer_request tensors with random values or image information
*/
void fillTensors(ov::InferRequest &infer_request, const std::vector<ov::Output<ov::Node>> &inputs) {
for (size_t i = 0; i < inputs.size(); ++i) {
ov::Tensor input_tensor;
if (inputs[i].get_element_type() == ov::element::f32) {
input_tensor = fillTensorRandom<float>(inputs[i]);
} else if (inputs[i].get_element_type() == ov::element::f16) {
input_tensor = fillTensorRandom<short>(inputs[i]);
} else if (inputs[i].get_element_type() == ov::element::i32) {
input_tensor = fillTensorRandom<int32_t>(inputs[i]);
} else if (inputs[i].get_element_type() == ov::element::u8) {
input_tensor = fillTensorRandom<uint8_t>(inputs[i]);
} else if (inputs[i].get_element_type() == ov::element::i8) {
input_tensor = fillTensorRandom<int8_t>(inputs[i]);
} else if (inputs[i].get_element_type() == ov::element::u16) {
input_tensor = fillTensorRandom<uint16_t>(inputs[i]);
} else if (inputs[i].get_element_type() == ov::element::i16) {
input_tensor = fillTensorRandom<int16_t>(inputs[i]);
} else {
throw std::logic_error(
"Input precision is not supported for " + inputs[i].get_element_type().get_type_name());
}
infer_request.set_input_tensor(i, input_tensor);
}
}

View File

@ -5,11 +5,11 @@
#pragma once
#include <inference_engine.hpp>
#include <limits>
using namespace InferenceEngine;
/**
* @brief Determine if InferenceEngine blob means image or not
* @brief Determine if InferenceEngine blob means image or not (OV API 1.0)
*/
template<typename T>
static bool isImage(const T &blob) {
@ -23,7 +23,7 @@ static bool isImage(const T &blob) {
/**
* @brief Determine if InferenceEngine blob means image information or not
* @brief Determine if InferenceEngine blob means image information or not (OV API 1.0)
*/
template<typename T>
static bool isImageInfo(const T &blob) {
@ -37,7 +37,7 @@ static bool isImageInfo(const T &blob) {
/**
* @brief Return height and width from provided InferenceEngine tensor description
* @brief Return height and width from provided InferenceEngine tensor description (OV API 1)
*/
inline std::pair<size_t, size_t> getTensorHeightWidth(const InferenceEngine::TensorDesc &desc) {
const auto &layout = desc.getLayout();
@ -61,12 +61,13 @@ inline std::pair<size_t, size_t> getTensorHeightWidth(const InferenceEngine::Ten
}
}
/**
* @brief Fill InferenceEngine blob with random values
*/
template<typename T>
void fillBlobRandom(Blob::Ptr& inputBlob) {
MemoryBlob::Ptr minput = as<MemoryBlob>(inputBlob);
void fillBlobRandom(InferenceEngine::Blob::Ptr &inputBlob) {
auto minput = InferenceEngine::as<InferenceEngine::MemoryBlob>(inputBlob);
// locked memory holder should be alive all time while access to its buffer happens
auto minputHolder = minput->wmap();
@ -77,15 +78,29 @@ void fillBlobRandom(Blob::Ptr& inputBlob) {
}
}
/**
* @brief Fill InferenceEngine blob with image information
* @brief Fill InferenceEngine tensor with random values (OV API 2.0)
*/
template<typename T>
void fillBlobImInfo(Blob::Ptr& inputBlob,
ov::Tensor fillTensorRandom(const ov::Output<ov::Node> &input) {
ov::Tensor tensor{input.get_element_type(), input.get_shape()};
std::vector<T> values(ov::shape_size(input.get_shape()));
for (size_t i = 0; i < values.size(); ++i) {
values[i] = 1 + static_cast <T> (rand()) / (static_cast <T> (RAND_MAX / (std::numeric_limits<T>::max() - 1)));
}
std::memcpy(tensor.data(), values.data(), sizeof(T) * values.size());
return tensor;
}
/**
* @brief Fill InferenceEngine blob with image information (OV API 1.0)
*/
template<typename T>
void fillBlobImInfo(InferenceEngine::Blob::Ptr &inputBlob,
const size_t &batchSize,
std::pair<size_t, size_t> image_size) {
MemoryBlob::Ptr minput = as<MemoryBlob>(inputBlob);
InferenceEngine::MemoryBlob::Ptr minput = InferenceEngine::as<InferenceEngine::MemoryBlob>(inputBlob);
// locked memory holder should be alive all time while access to its buffer happens
auto minputHolder = minput->wmap();
@ -111,3 +126,9 @@ void fillBlobImInfo(Blob::Ptr& inputBlob,
void fillBlobs(InferenceEngine::InferRequest inferRequest,
const InferenceEngine::ConstInputsDataMap &inputsInfo,
const size_t &batchSize);
/**
* @brief Fill InferRequest tensors with random values or image information
*/
void fillTensors(ov::InferRequest &infer_request,
const std::vector<ov::Output<ov::Node>> &inputs);

View File

@ -3,6 +3,10 @@
<value>CPU</value>
<value>GPU</value>
</devices>
<api_versions>
<value>1</value>
<value>2</value>
</api_versions>
<models>
<!--Models with FP32 precision-->
<model name="mobilenet-v2-1.4-224" precision="FP32" source="omz" />

View File

@ -2,6 +2,10 @@
<devices>
<value>MYRIAD</value>
</devices>
<api_versions>
<value>1</value>
<value>2</value>
</api_versions>
<models>
<!--Models with FP32 precision-->
<model name="mobilenet-v2-1.4-224" precision="FP32" source="omz" />

View File

@ -3,6 +3,10 @@
<value>CPU</value>
<value>GPU</value>
</devices>
<api_versions>
<value>1</value>
<value>2</value>
</api_versions>
<models>
<!--Models with FP32 precision-->
<model name="vgg16" precision="FP32" source="omz" />

View File

@ -3,6 +3,10 @@
<value>CPU</value>
<value>GPU</value>
</devices>
<api_versions>
<value>1</value>
<value>2</value>
</api_versions>
<models>
<!--Models with FP32 precision-->
<model name="mobilenet-v2-1.4-224" precision="FP32" source="omz" />

View File

@ -14,6 +14,10 @@
<value>CPU</value>
<value>GPU</value>
</devices>
<api_versions>
<value>1</value>
<value>2</value>
</api_versions>
<models>
<model name="alexnet" precision="FP32" source="omz" />
<model name="mobilenet-ssd" precision="FP32" source="omz" />

View File

@ -13,6 +13,10 @@
<devices>
<value>MYRIAD</value>
</devices>
<api_versions>
<value>1</value>
<value>2</value>
</api_versions>
<models>
<model name="alexnet" precision="FP32" source="omz" />
<model name="mobilenet-ssd" precision="FP32" source="omz" />

View File

@ -15,6 +15,10 @@
<value>CPU</value>
<value>GPU</value>
</devices>
<api_versions>
<value>1</value>
<value>2</value>
</api_versions>
<models>
<model name="alexnet" precision="FP32" source="omz" />
<model name="mobilenet-ssd" precision="FP32" source="omz" />

View File

@ -10,162 +10,120 @@
#include <string>
#include <inference_engine.hpp>
#include <openvino/openvino.hpp>
using namespace InferenceEngine;
std::function<void()> load_unload_plugin(const std::string &target_device) {
std::function<void()> load_unload_plugin(const std::string &target_device, const int &api_version) {
return [&] {
Core ie;
// GetVersions silently register plugin in `plugins` through `GetCPPPluginByName`
ie.GetVersions(target_device);
auto ie_api_wrapper = create_infer_api_wrapper(api_version);
// get_versions silently register plugin in `plugins` through `GetCPPPluginByName`
ie_api_wrapper->load_plugin(target_device);
// Remove plugin for target_device from `plugins`
ie.UnregisterPlugin(target_device);
ie_api_wrapper->unload_plugin(target_device);
};
}
std::function<void()> read_cnnnetwork(const std::string &model) {
std::function<void()> read_cnnnetwork(const std::string &model, const int &api_version) {
return [&] {
Core ie;
CNNNetwork cnnNetwork = ie.ReadNetwork(model);
auto ie_api_wrapper = create_infer_api_wrapper(api_version);
ie_api_wrapper->read_network(model);
};
}
std::function<void()> cnnnetwork_reshape_batch_x2(const std::string &model) {
std::function<void()> cnnnetwork_reshape_batch_x2(const std::string &model, const int &iter, const int &api_version) {
return [&] {
Core ie;
CNNNetwork cnnNetwork = ie.ReadNetwork(model);
const InputsDataMap inputInfo(cnnNetwork.getInputsInfo());
ICNNNetwork::InputShapes shapes = cnnNetwork.getInputShapes();
bool doReshape = false;
for (const InputsDataMap::value_type& input : inputInfo) {
int batchIndex = -1;
auto layout = input.second->getTensorDesc().getLayout();
if ((layout == Layout::NCHW) || (layout == Layout::NCDHW) ||
(layout == Layout::NHWC) || (layout == Layout::NDHWC) ||
(layout == Layout::NC)) {
batchIndex = 0;
} else if (layout == CN) {
batchIndex = 1;
}
if (batchIndex != -1) {
shapes[input.first][batchIndex] *= 2;
doReshape = true;
}
}
if (doReshape)
cnnNetwork.reshape(shapes);
else
throw std::logic_error("Reshape wasn't applied for a model.");
auto ie_api_wrapper = create_infer_api_wrapper(api_version);
ie_api_wrapper->read_network(model);
ie_api_wrapper->change_batch_size(2, iter);
};
}
std::function<void()> set_input_params(const std::string &model) {
std::function<void()> set_input_params(const std::string &model, const int &api_version) {
return [&] {
Core ie;
CNNNetwork cnnNetwork = ie.ReadNetwork(model);
InputsDataMap inputInfo(cnnNetwork.getInputsInfo());
for (auto &input : inputInfo) {
input.second->getPreProcess().setResizeAlgorithm(NO_RESIZE);
input.second->setPrecision(Precision::U8);
if (input.second->getInputData()->getTensorDesc().getDims().size() == 4)
input.second->setLayout(Layout::NCHW);
else if (input.second->getInputData()->getTensorDesc().getDims().size() == 2)
input.second->setLayout(Layout::NC);
else
throw std::logic_error("Setting of input parameters wasn't applied for a model.");
}
auto ie_api_wrapper = create_infer_api_wrapper(api_version);
ie_api_wrapper->read_network(model);
ie_api_wrapper->set_input_params(model);
};
}
std::function<void()> create_exenetwork(const std::string &model, const std::string &target_device) {
std::function<void()>
create_compiled_model(const std::string &model, const std::string &target_device, const int &api_version) {
return [&] {
Core ie;
CNNNetwork cnnNetwork = ie.ReadNetwork(model);
ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, target_device);
auto ie_api_wrapper = create_infer_api_wrapper(api_version);
ie_api_wrapper->read_network(model);
ie_api_wrapper->load_network(target_device);
};
}
std::function<void()> recreate_exenetwork(Core &ie, const std::string &model, const std::string &target_device) {
std::function<void()> recreate_compiled_model(std::shared_ptr<InferApiBase> &ie_wrapper, const std::string &model,
const std::string &target_device, const int &api_version) {
return [&] {
CNNNetwork cnnNetwork = ie.ReadNetwork(model);
ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, target_device);
};
}
std::function<void()> create_infer_request(const std::string &model, const std::string &target_device) {
return [&] {
Core ie;
CNNNetwork cnnNetwork = ie.ReadNetwork(model);
ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, target_device);
InferRequest infer_request = exeNetwork.CreateInferRequest();
ie_wrapper->load_plugin(target_device);
ie_wrapper->read_network(model);
ie_wrapper->load_network(target_device);
};
}
std::function<void()> recreate_infer_request(InferenceEngine::ExecutableNetwork& exeNetwork) {
std::function<void()>
create_infer_request(const std::string &model, const std::string &target_device, const int &api_version) {
return [&] {
InferRequest infer_request = exeNetwork.CreateInferRequest();
auto ie_api_wrapper = create_infer_api_wrapper(api_version);
ie_api_wrapper->read_network(model);
ie_api_wrapper->load_network(target_device);
ie_api_wrapper->create_infer_request();
};
}
std::function<void()> infer_request_inference(const std::string &model, const std::string &target_device) {
std::function<void()> recreate_infer_request(std::shared_ptr<InferApiBase> &ie_wrapper) {
return [&] {
Core ie;
CNNNetwork cnnNetwork = ie.ReadNetwork(model);
ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, target_device);
InferRequest infer_request = exeNetwork.CreateInferRequest();
auto batchSize = cnnNetwork.getBatchSize();
batchSize = batchSize != 0 ? batchSize : 1;
const InferenceEngine::ConstInputsDataMap inputsInfo(exeNetwork.GetInputsInfo());
fillBlobs(infer_request, inputsInfo, batchSize);
infer_request.Infer();
OutputsDataMap output_info(cnnNetwork.getOutputsInfo());
for (auto &output : output_info)
Blob::Ptr outputBlob = infer_request.GetBlob(output.first);
ie_wrapper->create_infer_request();
};
}
std::function<void()> reinfer_request_inference(InferenceEngine::InferRequest& infer_request, InferenceEngine::OutputsDataMap& output_info) {
std::function<void()>
infer_request_inference(const std::string &model, const std::string &target_device, const int &api_version) {
return [&] {
infer_request.Infer();
for (auto &output : output_info)
Blob::Ptr outputBlob = infer_request.GetBlob(output.first);
auto ie_api_wrapper = create_infer_api_wrapper(api_version);
ie_api_wrapper->read_network(model);
ie_api_wrapper->load_network(target_device);
ie_api_wrapper->create_infer_request();
ie_api_wrapper->prepare_input();
ie_api_wrapper->infer();
};
}
std::function<void()> inference_with_streams(const std::string &model, const std::string &target_device, const int& nstreams) {
std::function<void()> reinfer_request_inference(std::shared_ptr<InferApiBase> &ie_wrapper) {
return [&] {
std::map<std::string, std::string> config;
config[target_device + "_THROUGHPUT_STREAMS"] = std::to_string(nstreams);
ie_wrapper->infer();
};
}
Core ie;
ie.GetVersions(target_device);
ie.SetConfig(config, target_device);
InferRequest inferRequest;
CNNNetwork cnnNetwork = ie.ReadNetwork(model);
ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, target_device);
auto batchSize = cnnNetwork.getBatchSize();
batchSize = batchSize != 0 ? batchSize : 1;
const InferenceEngine::ConstInputsDataMap inputsInfo(exeNetwork.GetInputsInfo());
std::function<void()>
inference_with_streams(const std::string &model, const std::string &target_device, const int &nstreams,
const int &api_version) {
return [&] {
unsigned int nireq = nstreams;
auto ie_api_wrapper = create_infer_api_wrapper(api_version);
ie_api_wrapper->load_plugin(target_device);
ie_api_wrapper->set_config(target_device, "THROUGHPUT_STREAMS", nstreams);
ie_api_wrapper->read_network(model);
ie_api_wrapper->load_network(target_device);
try {
nireq = exeNetwork.GetMetric(METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS)).as<unsigned int>();
nireq = ie_api_wrapper->get_property(METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS));
} catch (const std::exception &ex) {
log_err("Failed to query OPTIMAL_NUMBER_OF_INFER_REQUESTS");
}
for (int counter = 0; counter < nireq; counter++) {
inferRequest = exeNetwork.CreateInferRequest();
fillBlobs(inferRequest, inputsInfo, batchSize);
inferRequest.Infer();
OutputsDataMap output_info(cnnNetwork.getOutputsInfo());
for (auto &output : output_info)
Blob::Ptr outputBlob = inferRequest.GetBlob(output.first);
for (int counter = 0; counter < nireq; counter++) {
ie_api_wrapper->create_infer_request();
ie_api_wrapper->prepare_input();
ie_api_wrapper->infer();
}
};
}

View File

@ -5,15 +5,33 @@
#include <string>
#include <functional>
#include <inference_engine.hpp>
#include "../infer_api/infer_api.h"
std::function<void()> load_unload_plugin(const std::string &target_device);
std::function<void()> read_cnnnetwork(const std::string &model);
std::function<void()> cnnnetwork_reshape_batch_x2(const std::string &model);
std::function<void()> set_input_params(const std::string &model);
std::function<void()> create_exenetwork(const std::string &model, const std::string &target_device);
std::function<void()> recreate_exenetwork(InferenceEngine::Core &ie, const std::string &model, const std::string &target_device);
std::function<void()> create_infer_request(const std::string &model, const std::string &target_device);
std::function<void()> recreate_infer_request(InferenceEngine::ExecutableNetwork& exeNetwork);
std::function<void()> infer_request_inference(const std::string &model, const std::string &target_device);
std::function<void()> reinfer_request_inference(InferenceEngine::InferRequest& infer_request, InferenceEngine::OutputsDataMap& output_info);
std::function<void()> inference_with_streams(const std::string &model, const std::string &target_device, const int& nstreams);
std::function<void()> load_unload_plugin(const std::string &target_device, const int &api_version);
std::function<void()> read_cnnnetwork(const std::string &model, const int &api_version);
std::function<void()> cnnnetwork_reshape_batch_x2(const std::string &model, const int &iter, const int &api_version);
std::function<void()> set_input_params(const std::string &model, const int &api_version);
std::function<void()>
create_compiled_model(const std::string &model, const std::string &target_device, const int &api_version);
std::function<void()>
create_infer_request(const std::string &model, const std::string &target_device, const int &api_version);
std::function<void()>
infer_request_inference(const std::string &model, const std::string &target_device, const int &api_version);
std::function<void()>
inference_with_streams(const std::string &model, const std::string &target_device, const int &nstreams,
const int &api_version);
std::function<void()>
recreate_compiled_model(std::shared_ptr<InferApiBase> &ie, const std::string &model, const std::string &target_device,
const int &api_version);
std::function<void()> recreate_infer_request(std::shared_ptr<InferApiBase> &ie_wrapper);
std::function<void()> reinfer_request_inference(std::shared_ptr<InferApiBase> &ie_wrapper);

View File

@ -0,0 +1,188 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "infer_api.h"
#include "openvino/core/preprocess/pre_post_process.hpp"
InferAPI1::InferAPI1() = default;
void InferAPI1::load_plugin(const std::string &device) {
ie.GetVersions(device);
}
void InferAPI1::unload_plugin(const std::string &device) {
ie.UnregisterPlugin(device);
}
void InferAPI1::read_network(const std::string &model) {
cnnNetwork = ie.ReadNetwork(model);
inputsInfo = cnnNetwork.getInputsInfo();
InferenceEngine::ICNNNetwork::InputShapes shapes = cnnNetwork.getInputShapes();
for (const auto &input: inputsInfo) {
original_batch_size = shapes[input.first][0];
}
original_batch_size = original_batch_size ? original_batch_size : 1;
}
void InferAPI1::load_network(const std::string &device) {
exeNetwork = ie.LoadNetwork(cnnNetwork, device);
}
void InferAPI1::create_infer_request() {
inferRequest = exeNetwork.CreateInferRequest();
}
void InferAPI1::prepare_input() {
auto batchSize = cnnNetwork.getBatchSize();
batchSize = batchSize != 0 ? batchSize : 1;
fillBlobs(inferRequest, exeNetwork.GetInputsInfo(), batchSize);
}
void InferAPI1::infer() {
inferRequest.Infer();
for (auto &output: outputInfo) {
InferenceEngine::Blob::Ptr outputBlob = inferRequest.GetBlob(output.first);
}
}
void InferAPI1::change_batch_size(int multiplier, int cur_iter) {
bool doReshape = false;
auto shapes = cnnNetwork.getInputShapes();
int new_batch_size = ((cur_iter % 2) == 0) ? original_batch_size * multiplier : original_batch_size;
for (const auto &input: inputsInfo) {
int batchIndex = -1;
auto layout = input.second->getTensorDesc().getLayout();
if ((layout == InferenceEngine::Layout::NCHW) || (layout == InferenceEngine::Layout::NCDHW) ||
(layout == InferenceEngine::Layout::NHWC) || (layout == InferenceEngine::Layout::NDHWC) ||
(layout == InferenceEngine::Layout::NC)) {
batchIndex = 0;
} else if (layout == InferenceEngine::CN) {
batchIndex = 1;
}
if (batchIndex != -1) {
shapes[input.first][batchIndex] = new_batch_size;
doReshape = true;
}
}
if (doReshape)
cnnNetwork.reshape(shapes);
else
throw std::logic_error("Reshape wasn't applied for a model.");
}
void InferAPI1::set_input_params(const std::string &model) {
cnnNetwork = ie.ReadNetwork(model);
InferenceEngine::InputsDataMap inputInfo(cnnNetwork.getInputsInfo());
for (auto &input: inputInfo) {
input.second->getPreProcess().setResizeAlgorithm(InferenceEngine::NO_RESIZE);
input.second->setPrecision(InferenceEngine::Precision::U8);
if (input.second->getInputData()->getTensorDesc().getDims().size() == 4)
input.second->setLayout(InferenceEngine::Layout::NCHW);
else if (input.second->getInputData()->getTensorDesc().getDims().size() == 2)
input.second->setLayout(InferenceEngine::Layout::NC);
else
throw std::logic_error("Setting of input parameters wasn't applied for a model.");
}
}
void InferAPI1::set_config(const std::string &device, const std::string &property, int nstreams) {
config[device + "_" + property] = std::to_string(nstreams);
ie.SetConfig(config, device);
}
unsigned int InferAPI1::get_property(const std::string &name) {
return exeNetwork.GetMetric(name).as<unsigned int>();
}
InferAPI2::InferAPI2() = default;
void InferAPI2::load_plugin(const std::string &device) {
ie.get_versions(device);
}
void InferAPI2::unload_plugin(const std::string &device) {
ie.unload_plugin(device);
}
void InferAPI2::read_network(const std::string &model) {
network = ie.read_model(model);
inputs = network->inputs();
for (const auto &input: inputs) {
auto tensor_shape = input.get_shape();
original_batch_size = tensor_shape[0];
original_batch_size = original_batch_size ? original_batch_size : 1;
}
}
void InferAPI2::load_network(const std::string &device) {
compiled_model = ie.compile_model(network, device);
}
void InferAPI2::create_infer_request() {
infer_request = compiled_model.create_infer_request();
}
void InferAPI2::prepare_input() {
fillTensors(infer_request, inputs);
}
void InferAPI2::infer() {
infer_request.infer();
for (size_t i = 0; i < outputs.size(); ++i) {
const auto &output_tensor = infer_request.get_output_tensor(i);
}
}
void InferAPI2::change_batch_size(int multiplier, int cur_iter) {
int new_batch_size = ((cur_iter % 2) == 0) ? original_batch_size * multiplier : original_batch_size;
for (auto &input: inputs) {
auto tensor_shape = input.get_shape();
tensor_shape[0] = new_batch_size;
network->reshape({{input.get_any_name(), tensor_shape}});
}
}
void InferAPI2::set_config(const std::string &device, const std::string &property, int nstreams) {
config[device + "_" + property] = std::to_string(nstreams);
ie.set_property(device, config);
}
unsigned int InferAPI2::get_property(const std::string &name) {
return compiled_model.get_property(name).as<unsigned int>();
}
void InferAPI2::set_input_params(const std::string &model) {
network = ie.read_model(model);
inputs = network->inputs();
auto ppp = ov::preprocess::PrePostProcessor(network);
for (size_t i = 0; i < inputs.size(); ++i) {
auto &input_info = ppp.input(i);
if (inputs[i].get_shape().size() == 4) {
input_info.tensor().set_element_type(ov::element::u8).set_layout("NCHW");
input_info.model().set_layout("NCHW");
ppp.input(i).preprocess().resize(ov::preprocess::ResizeAlgorithm::RESIZE_LINEAR);
} else if (inputs[i].get_shape().size() == 2) {
input_info.tensor().set_element_type(ov::element::u8).set_layout("NC");
input_info.model().set_layout("NC");
} else {
throw std::logic_error("Setting of input parameters wasn't applied for a model.");
}
}
network = ppp.build();
inputs = network->inputs();
}
std::shared_ptr<InferApiBase> create_infer_api_wrapper(const int &api_version) {
if (api_version == 1) {
return std::make_shared<InferAPI1>(InferAPI1());
} else if (api_version == 2) {
return std::make_shared<InferAPI2>(InferAPI2());
} else {
throw std::logic_error("Unsupported API version");
}
}

View File

@ -0,0 +1,111 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <inference_engine.hpp>
#include "openvino/runtime/core.hpp"
#include "common_utils.h"
class InferApiBase {
public:
virtual void load_plugin(const std::string &device) = 0;
virtual void unload_plugin(const std::string &device) = 0;
virtual void read_network(const std::string &model) = 0;
virtual void load_network(const std::string &device) = 0;
virtual void create_infer_request() = 0;
virtual void infer() = 0;
virtual void prepare_input() = 0;
virtual void change_batch_size(int multiplier, int cur_iter) = 0;
virtual void set_input_params(const std::string &model) = 0;
virtual void set_config(const std::string &device, const std::string &property, int nstreams) = 0;
virtual unsigned int get_property(const std::string &name) = 0;
};
class InferAPI1 : public InferApiBase {
public:
InferAPI1();
void load_plugin(const std::string &device) override;
void unload_plugin(const std::string &device) override;
void read_network(const std::string &model) override;
void load_network(const std::string &device) override;
void create_infer_request() override;
void prepare_input() override;
void infer() override;
void change_batch_size(int multiplier, int cur_iter) override;
void set_input_params(const std::string &model) override;
void set_config(const std::string &device, const std::string &property, int nstreams) override;
unsigned int get_property(const std::string &name) override;
private:
InferenceEngine::Core ie;
InferenceEngine::CNNNetwork cnnNetwork;
InferenceEngine::ExecutableNetwork exeNetwork;
InferenceEngine::InferRequest inferRequest;
InferenceEngine::InputsDataMap inputsInfo;
InferenceEngine::OutputsDataMap outputInfo;
int original_batch_size;
std::map<std::string, std::string> config;
};
class InferAPI2 : public InferApiBase {
public:
InferAPI2();
void load_plugin(const std::string &device) override;
void unload_plugin(const std::string &device) override;
void read_network(const std::string &model) override;
void load_network(const std::string &device) override;
void create_infer_request() override;
void prepare_input() override;
void infer() override;
void change_batch_size(int multiplier, int cur_iter) override;
void set_input_params(const std::string &model) override;
void set_config(const std::string &device, const std::string &property, int nstreams) override;
unsigned int get_property(const std::string &name) override;
private:
ov::Core ie;
std::shared_ptr<ov::Model> network;
ov::CompiledModel compiled_model;
ov::InferRequest infer_request;
std::vector<ov::Output<ov::Node>> inputs;
std::vector<ov::Output<ov::Node>> outputs;
int original_batch_size;
std::map<std::string, ov::Any> config;
};
std::shared_ptr<InferApiBase> create_infer_api_wrapper(const int &api_version);

View File

@ -19,28 +19,32 @@ std::vector<TestCase> generateTestsParams(std::initializer_list<std::string> fie
std::vector<TestCase> tests_cases;
const pugi::xml_document &test_config = Environment::Instance().getTestConfig();
std::vector<int> processes, threads, iterations;
std::vector<int> processes, threads, iterations, api_versions;
std::vector<std::string> devices, models, models_names, precisions;
pugi::xml_node values;
for (auto field = fields.begin(); field != fields.end(); field++) {
if (*field == "processes") {
for (const auto &field: fields) {
if (field == "processes") {
values = test_config.child("attributes").child("processes");
for (pugi::xml_node val = values.first_child(); val; val = val.next_sibling())
processes.push_back(val.text().as_int());
} else if (*field == "threads") {
} else if (field == "threads") {
values = test_config.child("attributes").child("threads");
for (pugi::xml_node val = values.first_child(); val; val = val.next_sibling())
threads.push_back(val.text().as_int());
} else if (*field == "iterations") {
} else if (field == "iterations") {
values = test_config.child("attributes").child("iterations");
for (pugi::xml_node val = values.first_child(); val; val = val.next_sibling())
iterations.push_back(val.text().as_int());
} else if (*field == "devices") {
} else if (field == "devices") {
values = test_config.child("attributes").child("devices");
for (pugi::xml_node val = values.first_child(); val; val = val.next_sibling())
devices.push_back(val.text().as_string());
} else if (*field == "models") {
devices.emplace_back(val.text().as_string());
} else if (field == "api_versions") {
values = test_config.child("attributes").child("api_versions");
for (pugi::xml_node val = values.first_child(); val; val = val.next_sibling())
api_versions.push_back(val.text().as_int());
} else if (field == "models") {
values = test_config.child("attributes").child("models");
for (pugi::xml_node val = values.first_child(); val; val = val.next_sibling()) {
std::string full_path = val.attribute("full_path").as_string();
@ -62,6 +66,7 @@ std::vector<TestCase> generateTestsParams(std::initializer_list<std::string> fie
processes = !processes.empty() ? processes : std::vector<int>{1};
threads = !threads.empty() ? threads : std::vector<int>{1};
iterations = !iterations.empty() ? iterations : std::vector<int>{1};
api_versions = !api_versions.empty() ? api_versions : std::vector<int>{1, 2};
devices = !devices.empty() ? devices : std::vector<std::string>{"NULL"};
models = !models.empty() ? models : std::vector<std::string>{"NULL"};
precisions = !precisions.empty() ? precisions : std::vector<std::string>{"NULL"};
@ -70,10 +75,11 @@ std::vector<TestCase> generateTestsParams(std::initializer_list<std::string> fie
for (auto &numprocesses: processes)
for (auto &numthreads: threads)
for (auto &numiters: iterations)
for (auto &api_version: api_versions)
for (auto &device: devices)
for (int i = 0; i < models.size(); i++)
tests_cases.push_back(TestCase(numprocesses, numthreads, numiters, device, models[i],
models_names[i], precisions[i]));
tests_cases.emplace_back(numprocesses, numthreads, numiters, api_version, device, models[i],
models_names[i], precisions[i]);
return tests_cases;
}
@ -93,6 +99,7 @@ std::vector<MemLeaksTestCase> generateTestsParamsMemLeaks() {
numprocesses = device.attribute("processes").as_int(1);
numthreads = device.attribute("threads").as_int(1);
numiterations = device.attribute("iterations").as_int(1);
std::vector<int> api_versions{1, 2};
std::vector<std::map<std::string, std::string>> models;
@ -110,7 +117,9 @@ std::vector<MemLeaksTestCase> generateTestsParamsMemLeaks() {
{"precision", precision}};
models.push_back(model_map);
}
tests_cases.push_back(MemLeaksTestCase(numprocesses, numthreads, numiterations, device_name, models));
for (auto api_version: api_versions) {
tests_cases.emplace_back(numprocesses, numthreads, numiterations, api_version, device_name, models);
}
}
return tests_cases;
@ -124,15 +133,16 @@ std::string getTestCaseNameMemLeaks(const testing::TestParamInfo<MemLeaksTestCas
return obj.param.test_case_name;
}
void test_wrapper(const std::function<void(std::string, std::string, int)> &tests_pipeline, const TestCase &params) {
tests_pipeline(params.model, params.device, params.numiters);
void test_wrapper(const std::function<void(std::string, std::string, int, int)> &tests_pipeline,
const TestCase &params) {
tests_pipeline(params.model, params.device, params.numiters, params.api_version);
}
void _runTest(const std::function<void(std::string, std::string, int)> &tests_pipeline, const TestCase &params) {
void _runTest(const std::function<void(std::string, std::string, int, int)> &tests_pipeline, const TestCase &params) {
run_in_threads(params.numthreads, test_wrapper, tests_pipeline, params);
}
void runTest(const std::function<void(std::string, std::string, int)> &tests_pipeline, const TestCase &params) {
void runTest(const std::function<void(std::string, std::string, int, int)> &tests_pipeline, const TestCase &params) {
#if DEBUG_MODE
tests_pipeline(params.model, params.device, params.numiters);
#else

View File

@ -13,7 +13,9 @@
#include <vector>
enum TestStatus { TEST_NOT_STARTED = 0, TEST_FAILED, TEST_OK };
enum TestStatus {
TEST_NOT_STARTED = 0, TEST_FAILED, TEST_OK
};
using TestResult = std::pair<TestStatus, std::string>;
@ -22,6 +24,7 @@ public:
int numprocesses;
int numthreads;
int numiters;
int api_version;
std::string precision;
std::string test_case_name;
std::string model_name;
@ -29,10 +32,10 @@ public:
protected:
// Replace non-alphabetic/numeric symbols with "_" to prevent logging errors
std::string update_item_for_name(const std::string &item) {
static std::string update_item_for_name(const std::string &item) {
std::string _item(item);
for (std::string::size_type index = 0; index < _item.size(); ++index) {
if (!isalnum(_item[index]) && _item[index] != '_') _item[index] = '_';
for (char &index: _item) {
if (!isalnum(index) && index != '_') index = '_';
}
return _item;
}
@ -42,13 +45,15 @@ class TestCase : public TestCaseBase {
public:
std::string model;
TestCase(int _numprocesses, int _numthreads, int _numiters, std::string _device, const std::string &_model,
TestCase(int _numprocesses, int _numthreads, int _numiters, int _api_version, std::string _device,
const std::string &_model,
const std::string &_model_name, const std::string &_precision) {
numprocesses = _numprocesses, numthreads = _numthreads, numiters = _numiters, device = _device, model = _model,
model_name = _model_name, precision = _precision;
numprocesses = _numprocesses, numthreads = _numthreads, numiters = _numiters, api_version = _api_version,
device = _device, model = _model, model_name = _model_name, precision = _precision;
test_case_name = "Numprocesses_" + std::to_string(numprocesses) + "_Numthreads_" + std::to_string(numthreads) +
"_Numiters_" + std::to_string(numiters) + "_Device_" + update_item_for_name(device) +
"_Precision_" + update_item_for_name(precision) + "_Model_" + update_item_for_name(model_name);
"_Precision_" + update_item_for_name(precision) + "_Model_" + update_item_for_name(model_name)
+ "_API_" + std::to_string(api_version);
}
};
@ -56,12 +61,13 @@ class MemLeaksTestCase : public TestCaseBase {
public:
std::vector<std::map<std::string, std::string>> models;
MemLeaksTestCase(int _numprocesses, int _numthreads, int _numiters, std::string _device,
MemLeaksTestCase(int _numprocesses, int _numthreads, int _numiters, int _api_version, std::string _device,
std::vector<std::map<std::string, std::string>> _models) {
numprocesses = _numprocesses, numthreads = _numthreads, numiters = _numiters, device = _device,
models = _models;
numprocesses = _numprocesses, numthreads = _numthreads, numiters = _numiters, api_version = _api_version,
device = _device, models = _models;
test_case_name = "Numprocesses_" + std::to_string(numprocesses) + "_Numthreads_" + std::to_string(numthreads) +
"_Numiters_" + std::to_string(numiters) + "_Device_" + update_item_for_name(device);
"_Numiters_" + std::to_string(numiters) + "_Device_" + update_item_for_name(device) + "_API_" +
std::to_string(api_version);
for (int i = 0; i < models.size(); i++) {
test_case_name += "_Model" + std::to_string(i + 1) + "_" + update_item_for_name(models[i]["name"]) + "_" +
update_item_for_name(models[i]["precision"]);
@ -76,7 +82,9 @@ private:
bool _collect_results_only = false;
Environment() = default;
Environment(const Environment &) = delete;
Environment &operator=(const Environment &) = delete;
public:
@ -86,14 +94,21 @@ public:
}
const pugi::xml_document &getTestConfig();
void setTestConfig(const pugi::xml_document &test_config);
};
std::vector<TestCase> generateTestsParams(std::initializer_list<std::string> items);
std::vector<MemLeaksTestCase> generateTestsParamsMemLeaks();
std::string getTestCaseName(const testing::TestParamInfo<TestCase> &obj);
std::string getTestCaseNameMemLeaks(const testing::TestParamInfo<MemLeaksTestCase> &obj);
void runTest(const std::function<void(std::string, std::string, int)> &tests_pipeline, const TestCase &params);
void _runTest(const std::function<void(std::string, std::string, int)> &tests_pipeline, const TestCase &params);
void test_wrapper(const std::function<void(std::string, std::string, int)> &tests_pipeline, const TestCase &params);
void runTest(const std::function<void(std::string, std::string, int, int)> &tests_pipeline, const TestCase &params);
void _runTest(const std::function<void(std::string, std::string, int, int)> &tests_pipeline, const TestCase &params);
void test_wrapper(const std::function<void(std::string, std::string, int, int)> &tests_pipeline,
const TestCase &params);

View File

@ -4,6 +4,7 @@
#include "tests_utils.h"
#include "../common/tests_utils.h"
#include "../common/infer_api/infer_api.h"
#include "common_utils.h"
#include "../common/managers/thread_manager.h"
#include "tests_pipelines/tests_pipelines.h"
@ -11,8 +12,7 @@
#include <gtest/gtest.h>
#include <inference_engine.hpp>
using namespace InferenceEngine;
#include <openvino/runtime/core.hpp>
class MemCheckTestSuite : public ::testing::TestWithParam<TestCase> {
@ -23,7 +23,6 @@ public:
void SetUp() override {
const ::testing::TestInfo *const test_info = ::testing::UnitTest::GetInstance()->current_test_info();
test_name = std::string(test_info->name()).substr(0, std::string(test_info->name()).find('/'));
//const std::string full_test_name = std::string(test_info->test_case_name()) + "." + std::string(test_info->name());
const auto &test_params = GetParam();
model = test_params.model;
@ -48,17 +47,15 @@ TEST_P(MemCheckTestSuite, create_exenetwork) {
log_info("Create ExecutableNetwork from network: \"" << model
<< "\" with precision: \"" << precision
<< "\" for device: \"" << device << "\"");
auto test_pipeline = [&]{
auto test_params = GetParam();
MemCheckPipeline memCheckPipeline;
Core ie;
ie.GetVersions(device);
CNNNetwork cnnNetwork = ie.ReadNetwork(model);
ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, device);
auto test_pipeline = [&] {
auto ie_api_wrapper = create_infer_api_wrapper(test_params.api_version);
ie_api_wrapper->load_plugin(device);
ie_api_wrapper->read_network(model);
ie_api_wrapper->load_network(device);
log_info("Memory consumption after LoadNetwork:");
memCheckPipeline.record_measures(test_name);
log_debug(memCheckPipeline.get_reference_record_for_test(test_name, model_name, precision, device));
return memCheckPipeline.measure();
};
@ -71,25 +68,16 @@ TEST_P(MemCheckTestSuite, infer_request_inference) {
log_info("Inference of InferRequest from network: \"" << model
<< "\" with precision: \"" << precision
<< "\" for device: \"" << device << "\"");
auto test_pipeline = [&]{
auto test_params = GetParam();
MemCheckPipeline memCheckPipeline;
Core ie;
ie.GetVersions(device);
CNNNetwork cnnNetwork = ie.ReadNetwork(model);
ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, device);
InferRequest inferRequest = exeNetwork.CreateInferRequest();
auto batchSize = cnnNetwork.getBatchSize();
batchSize = batchSize != 0 ? batchSize : 1;
const ConstInputsDataMap inputsInfo(exeNetwork.GetInputsInfo());
fillBlobs(inferRequest, inputsInfo, batchSize);
inferRequest.Infer();
OutputsDataMap output_info(cnnNetwork.getOutputsInfo());
for (auto &output : output_info)
Blob::Ptr outputBlob = inferRequest.GetBlob(output.first);
auto test_pipeline = [&] {
auto ie_api_wrapper = create_infer_api_wrapper(test_params.api_version);
ie_api_wrapper->load_plugin(device);
ie_api_wrapper->read_network(model);
ie_api_wrapper->load_network(device);
ie_api_wrapper->create_infer_request();
ie_api_wrapper->prepare_input();
ie_api_wrapper->infer();
log_info("Memory consumption after Inference:");
memCheckPipeline.record_measures(test_name);
@ -108,6 +96,7 @@ INSTANTIATE_TEST_SUITE_P(MemCheckTests, MemCheckTestSuite,
getTestCaseName);
TEST_P(MemCheckTestSuite, inference_with_streams) {
auto test_params = GetParam();
const auto nstreams = 2;
log_info("Inference of InferRequest from network: \"" << model
<< "\" with precision: \"" << precision
@ -118,37 +107,23 @@ TEST_P(MemCheckTestSuite, inference_with_streams) {
auto test_pipeline = [&] {
MemCheckPipeline memCheckPipeline;
std::map<std::string, std::string> config;
const std::string key = device + "_THROUGHPUT_STREAMS";
config[device + "_THROUGHPUT_STREAMS"] = std::to_string(nstreams);
Core ie;
ie.GetVersions(device);
ie.SetConfig(config, device);
InferRequest inferRequest;
CNNNetwork cnnNetwork = ie.ReadNetwork(model);
ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, device);
auto batchSize = cnnNetwork.getBatchSize();
batchSize = batchSize != 0 ? batchSize : 1;
const ConstInputsDataMap inputsInfo(exeNetwork.GetInputsInfo());
unsigned int nireq = nstreams;
auto ie_api_wrapper = create_infer_api_wrapper(test_params.api_version);
ie_api_wrapper->load_plugin(device);
ie_api_wrapper->set_config(device, "THROUGHPUT_STREAMS", nstreams);
ie_api_wrapper->read_network(model);
ie_api_wrapper->load_network(device);
try {
nireq = exeNetwork.GetMetric(METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS)).as<unsigned int>();
nireq = ie_api_wrapper->get_property(METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS));
} catch (const std::exception &ex) {
log_err("Failed to query OPTIMAL_NUMBER_OF_INFER_REQUESTS");
}
for (int counter = 0; counter < nireq; counter++) {
inferRequest = exeNetwork.CreateInferRequest();
fillBlobs(inferRequest, inputsInfo, batchSize);
inferRequest.Infer();
OutputsDataMap output_info(cnnNetwork.getOutputsInfo());
for (auto &output : output_info)
Blob::Ptr outputBlob = inferRequest.GetBlob(output.first);
for (int counter = 0; counter < nireq; counter++) {
ie_api_wrapper->create_infer_request();
ie_api_wrapper->prepare_input();
ie_api_wrapper->infer();
}
log_info("Memory consumption after Inference with streams: \"" << nstreams

View File

@ -28,6 +28,7 @@ private:
* @brief Measures values at the current point of time
*/
std::array<long, MeasureValueMax> _measure();
public:
/**
* @brief Constructs MemCheckPipeline object and

View File

@ -10,7 +10,9 @@
#include <pugixml.hpp>
// Measure values
enum MeasureValue { VMRSS = 0, VMHWM, VMSIZE, VMPEAK, THREADS, MeasureValueMax };
enum MeasureValue {
VMRSS = 0, VMHWM, VMSIZE, VMPEAK, THREADS, MeasureValueMax
};
// Measure values headers
const std::array<std::string, MeasureValueMax> MeasureValueHeader{"VMRSS", "VMHWM", "VMSIZE", "VMPEAK", "THREADS"};
@ -35,9 +37,13 @@ namespace util {
class MemCheckEnvironment {
private:
pugi::xml_document _refs_config;
MemCheckEnvironment() = default;
MemCheckEnvironment(const MemCheckEnvironment &) = delete;
MemCheckEnvironment &operator=(const MemCheckEnvironment &) = delete;
public:
static MemCheckEnvironment &Instance() {
static MemCheckEnvironment env;

View File

@ -3,21 +3,22 @@
//
#include "../common/managers/thread_manager.h"
#include "../common/tests_utils.h"
#include "common_utils.h"
#include "../common/infer_api/infer_api.h"
#include "tests_pipelines/tests_pipelines.h"
#include <inference_engine.hpp>
#include <gtest/gtest.h>
using namespace InferenceEngine;
class MemLeaksTestSuiteNoModel : public ::testing::TestWithParam<MemLeaksTestCase> {};
class MemLeaksTestSuiteNoModel : public ::testing::TestWithParam<MemLeaksTestCase> {
};
class MemLeaksTestSuiteNoDevice : public ::testing::TestWithParam<MemLeaksTestCase> {};
class MemLeaksTestSuiteNoDevice : public ::testing::TestWithParam<MemLeaksTestCase> {
};
class MemLeaksTestSuite : public ::testing::TestWithParam<MemLeaksTestCase> {};
class MemLeaksTestSuite : public ::testing::TestWithParam<MemLeaksTestCase> {
};
inline void test_runner(int numthreads, const std::function<TestResult()> &test_function) {
ThreadManager<TestResult> thr_manager;
@ -38,7 +39,7 @@ inline void test_runner(int numthreads, const std::function<TestResult()> &test_
TEST_P(MemLeaksTestSuiteNoModel, load_unload_plugin) {
auto test_params = GetParam();
std::vector<std::function<void()>> pipeline = {load_unload_plugin(test_params.device)};
std::vector<std::function<void()>> pipeline = {load_unload_plugin(test_params.device, test_params.api_version)};
auto test = [&] {
log_info("Load/unload plugin for \"" << test_params.device << "\" device"
<< " for " << test_params.numiters << " times");
@ -51,8 +52,9 @@ TEST_P(MemLeaksTestSuiteNoDevice, read_network) {
auto test_params = GetParam();
std::vector<std::function<void()>> pipeline;
pipeline.reserve(test_params.models.size());
for (int i = 0; i < test_params.models.size(); i++) {
pipeline.push_back(read_cnnnetwork(test_params.models[i]["full_path"]));
pipeline.push_back(read_cnnnetwork(test_params.models[i]["full_path"], test_params.api_version));
}
auto test = [&] {
log_info("Read networks: " << test_params.model_name << " for " << test_params.numiters << " times");
@ -65,8 +67,9 @@ TEST_P(MemLeaksTestSuiteNoDevice, cnnnetwork_reshape_batch_x2) {
auto test_params = GetParam();
std::vector<std::function<void()>> pipeline;
pipeline.reserve(test_params.models.size());
for (int i = 0; i < test_params.models.size(); i++) {
pipeline.push_back(cnnnetwork_reshape_batch_x2(test_params.models[i]["full_path"]));
pipeline.push_back(cnnnetwork_reshape_batch_x2(test_params.models[i]["full_path"], i, test_params.api_version));
}
auto test = [&] {
log_info("Reshape to batch*=2 of CNNNetworks created from networks: " << test_params.model_name << " for "
@ -80,8 +83,9 @@ TEST_P(MemLeaksTestSuiteNoDevice, set_input_params) {
auto test_params = GetParam();
std::vector<std::function<void()>> pipeline;
pipeline.reserve(test_params.models.size());
for (int i = 0; i < test_params.models.size(); i++) {
pipeline.push_back(set_input_params(test_params.models[i]["full_path"]));
pipeline.push_back(set_input_params(test_params.models[i]["full_path"], test_params.api_version));
}
auto test = [&] {
log_info("Apply preprocessing for CNNNetworks from networks: " << test_params.model_name << " for "
@ -93,11 +97,13 @@ TEST_P(MemLeaksTestSuiteNoDevice, set_input_params) {
TEST_P(MemLeaksTestSuite, recreate_exenetwork) {
auto test_params = GetParam();
Core ie;
std::vector<std::function<void()>> pipeline;
auto ie_wrapper = create_infer_api_wrapper(test_params.api_version);
pipeline.reserve(test_params.models.size());
for (int i = 0; i < test_params.models.size(); i++) {
pipeline.push_back(recreate_exenetwork(ie, test_params.models[i]["full_path"], test_params.device));
pipeline.push_back(recreate_compiled_model(ie_wrapper, test_params.models[i]["full_path"], test_params.device,
test_params.api_version));
}
auto test = [&] {
log_info("Recreate ExecutableNetworks within existing InferenceEngine::Core from networks: "
@ -110,19 +116,17 @@ TEST_P(MemLeaksTestSuite, recreate_exenetwork) {
TEST_P(MemLeaksTestSuite, recreate_infer_request) {
auto test_params = GetParam();
Core ie;
std::vector<std::function<void()>> pipeline;
std::vector<ExecutableNetwork> exeNetworks;
auto ie_wrapper = create_infer_api_wrapper(test_params.api_version);
int n_models = test_params.models.size();
exeNetworks.reserve(n_models);
size_t n_models = test_params.models.size();
for (int i = 0; i < n_models; i++) {
CNNNetwork cnnNetwork = ie.ReadNetwork(test_params.models[i]["full_path"]);
ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, test_params.device);
exeNetworks.push_back(exeNetwork);
pipeline.push_back(recreate_infer_request(exeNetworks[i]));
ie_wrapper->read_network(test_params.models[i]["full_path"]);
ie_wrapper->load_network(test_params.device);
pipeline.push_back(recreate_infer_request(ie_wrapper));
}
auto test = [&] {
log_info("Create InferRequests from networks: " << test_params.model_name << " for \"" << test_params.device
<< "\" device for " << test_params.numiters << " times");
@ -133,28 +137,18 @@ TEST_P(MemLeaksTestSuite, recreate_infer_request) {
TEST_P(MemLeaksTestSuite, reinfer_request_inference) {
auto test_params = GetParam();
Core ie;
std::vector<std::function<void()>> pipeline;
std::vector<InferRequest> infer_requests;
std::vector<OutputsDataMap> outputs_info;
int n_models = test_params.models.size();
infer_requests.reserve(n_models);
outputs_info.reserve(n_models);
auto ie_wrapper = create_infer_api_wrapper(test_params.api_version);
size_t n_models = test_params.models.size();
for (int i = 0; i < n_models; i++) {
CNNNetwork cnnNetwork = ie.ReadNetwork(test_params.models[i]["full_path"]);
ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, test_params.device);
InferRequest infer_request = exeNetwork.CreateInferRequest();
infer_requests.push_back(infer_request);
OutputsDataMap output_info(cnnNetwork.getOutputsInfo());
outputs_info.push_back(output_info);
auto batchSize = cnnNetwork.getBatchSize();
batchSize = batchSize != 0 ? batchSize : 1;
const InferenceEngine::ConstInputsDataMap inputsInfo(exeNetwork.GetInputsInfo());
fillBlobs(infer_requests[i], inputsInfo, batchSize);
pipeline.push_back(reinfer_request_inference(infer_requests[i], outputs_info[i]));
ie_wrapper->read_network(test_params.models[i]["full_path"]);
ie_wrapper->load_network(test_params.device);
ie_wrapper->create_infer_request();
ie_wrapper->prepare_input();
pipeline.push_back(reinfer_request_inference(ie_wrapper));
}
auto test = [&] {
log_info("Inference of InferRequests from networks: " << test_params.model_name << " for \""
<< test_params.device << "\" device for "
@ -167,8 +161,10 @@ TEST_P(MemLeaksTestSuite, reinfer_request_inference) {
TEST_P(MemLeaksTestSuite, infer_request_inference) {
auto test_params = GetParam();
std::vector<std::function<void()>> pipeline;
pipeline.reserve(test_params.models.size());
for (int i = 0; i < test_params.models.size(); i++) {
pipeline.push_back(infer_request_inference(test_params.models[i]["full_path"], test_params.device));
pipeline.push_back(infer_request_inference(test_params.models[i]["full_path"], test_params.device,
test_params.api_version));
}
auto test = [&] {
log_info("Inference of InferRequests from networks: " << test_params.model_name << " for \""
@ -183,8 +179,10 @@ TEST_P(MemLeaksTestSuite, inference_with_streams) {
auto test_params = GetParam();
const auto nstreams = 2;
std::vector<std::function<void()>> pipeline;
pipeline.reserve(test_params.models.size());
for (int i = 0; i < test_params.models.size(); i++) {
pipeline.push_back(inference_with_streams(test_params.models[i]["full_path"], test_params.device, nstreams));
pipeline.push_back(inference_with_streams(test_params.models[i]["full_path"], test_params.device, nstreams,
test_params.api_version));
}
auto test = [&] {
log_info("Inference of InferRequests from networks: " << test_params.model_name << " for \""

View File

@ -28,7 +28,9 @@ using namespace InferenceEngine;
#define THRESHOLD 0.1
// Measure values
enum MeasureValue { VMRSS = 0, VMHWM, VMSIZE, VMPEAK, THREADS, MeasureValueMax };
enum MeasureValue {
VMRSS = 0, VMHWM, VMSIZE, VMPEAK, THREADS, MeasureValueMax
};
namespace util {
template<typename In, typename Out, typename Func>
@ -42,7 +44,7 @@ namespace util {
}
}// namespace util
TestResult common_test_pipeline(std::vector<std::function<void()>> test_pipeline, const int &n) {
TestResult common_test_pipeline(const std::vector<std::function<void()>> &test_pipeline, const int &n) {
if (AVERAGE_NUM > n)
return TestResult(TestStatus::TEST_FAILED, "Test failed: number of iterations less than defined AVERAGE_NUM");
@ -65,7 +67,7 @@ TestResult common_test_pipeline(std::vector<std::function<void()>> test_pipeline
for (size_t iteration = 1, measure_count = n / AVERAGE_NUM;; iteration++) {
// run test pipeline and collect metrics
for (auto step : test_pipeline) step();
for (const auto &step: test_pipeline) step();
getVmValues(cur[VMSIZE], cur[VMPEAK], cur[VMRSS], cur[VMHWM]);
cur[THREADS] = getThreadsNum();

View File

@ -13,5 +13,5 @@
#include <inference_engine.hpp>
// tests_pipelines/tests_pipelines.cpp
TestResult common_test_pipeline(std::vector<std::function<void()>> test_pipeline, const int &n);
TestResult common_test_pipeline(const std::vector<std::function<void()>> &test_pipeline, const int &n);
// tests_pipelines/tests_pipelines.cpp

View File

@ -129,6 +129,9 @@ def main():
parser.add_argument('--comparison_report',
required=args.compare,
help='create comparison report file name')
parser.add_argument('--gtest_filter',
default='',
help='gtest filter passed to gtest')
args = parser.parse_args()
@ -150,6 +153,7 @@ def main():
'--output_dir', f'{args.output_dir}',
'--workers', f'{args.workers}',
'--timeout', f'{args.timeout}',
'--gtest_filter', f'{args.gtest_filter}',
args.binary, '--'] + binary_args)
if args.upload or args.timeline_report or args.compare:

View File

@ -33,8 +33,8 @@ TEST_P(UnitTestSuiteNoDevice, set_input_params) {
runTest(test_set_input_params, GetParam());
}
TEST_P(UnitTestSuite, create_exenetwork) {
runTest(test_create_exenetwork, GetParam());
TEST_P(UnitTestSuite, create_compiled_model) {
runTest(test_create_compiled_model, GetParam());
}
TEST_P(UnitTestSuite, create_infer_request) {
@ -78,13 +78,16 @@ TEST_P(UnitTestSuite, infer_request_inference_full_pipeline) {
// tests_pipelines/tests_pipelines_full_pipeline.cpp
INSTANTIATE_TEST_SUITE_P(StressUnitTests, UnitTestSuiteNoModel,
::testing::ValuesIn(generateTestsParams({"processes", "threads", "iterations", "devices"})),
::testing::ValuesIn(generateTestsParams(
{"processes", "threads", "iterations", "devices", "api_versions"})),
getTestCaseName);
INSTANTIATE_TEST_SUITE_P(StressUnitTests, UnitTestSuiteNoDevice,
::testing::ValuesIn(generateTestsParams({"processes", "threads", "iterations", "models"})),
::testing::ValuesIn(generateTestsParams(
{"processes", "threads", "iterations", "models", "api_versions"})),
getTestCaseName);
INSTANTIATE_TEST_SUITE_P(StressUnitTests, UnitTestSuite,
::testing::ValuesIn(generateTestsParams({"processes", "threads", "iterations", "devices", "models"})),
::testing::ValuesIn(generateTestsParams(
{"processes", "threads", "iterations", "devices", "models", "api_versions"})),
getTestCaseName);

View File

@ -11,75 +11,84 @@
using namespace InferenceEngine;
void test_load_unload_plugin(const std::string &model, const std::string &target_device, const int &n) {
void test_load_unload_plugin(const std::string &model, const std::string &target_device, const int &n,
const int &api_version) {
log_info("Load/unload plugin for device: " << target_device << " for " << n << " times");
for (int i = 0; i < n; i++) {
if (i == n / 2) {
log_info("Half of the test have already passed");
}
load_unload_plugin(target_device)();
load_unload_plugin(target_device, api_version)();
}
}
void test_read_network(const std::string &model, const std::string &target_device, const int &n) {
void test_read_network(const std::string &model, const std::string &target_device, const int &n, const int &api_version) {
log_info("Read network: \"" << model << "\" for " << n << " times");
for (int i = 0; i < n; i++) {
if (i == n / 2) {
log_info("Half of the test have already passed");
}
read_cnnnetwork(model)();
read_cnnnetwork(model, api_version)();
}
}
void test_cnnnetwork_reshape_batch_x2(const std::string &model, const std::string &target_device, const int &n) {
void test_cnnnetwork_reshape_batch_x2(const std::string &model, const std::string &target_device, const int &n,
const int &api_version) {
log_info("Reshape to batch*=2 of CNNNetwork created from network: \"" << model << "\" for " << n << " times");
for (int i = 0; i < n; i++) {
if (i == n / 2) {
log_info("Half of the test have already passed");
}
cnnnetwork_reshape_batch_x2(model)();
cnnnetwork_reshape_batch_x2(model, i, api_version)();
}
}
void test_set_input_params(const std::string &model, const std::string &target_device, const int &n) {
void test_set_input_params(const std::string &model, const std::string &target_device, const int &n,
const int &api_version) {
log_info("Apply preprocessing for CNNNetwork from network: \"" << model << "\" for " << n << " times");
for (int i = 0; i < n; i++) {
if (i == n / 2) {
log_info("Half of the test have already passed");
}
set_input_params(model)();
set_input_params(model, api_version)();
}
}
void test_create_exenetwork(const std::string &model, const std::string &target_device, const int &n) {
void test_create_compiled_model(const std::string &model, const std::string &target_device, const int &n,
const int &api_version) {
log_info("Create ExecutableNetwork from network: \"" << model
<< "\" for device: \"" << target_device << "\" for " << n << " times");
<< "\" for device: \"" << target_device << "\" for " << n
<< " times");
for (int i = 0; i < n; i++) {
if (i == n / 2) {
log_info("Half of the test have already passed");
}
create_exenetwork(model, target_device)();
create_compiled_model(model, target_device, api_version)();
}
}
void test_create_infer_request(const std::string &model, const std::string &target_device, const int &n) {
void test_create_infer_request(const std::string &model, const std::string &target_device, const int &n,
const int &api_version) {
log_info("Create InferRequest from network: \"" << model
<< "\" for device: \"" << target_device << "\" for " << n << " times");
<< "\" for device: \"" << target_device << "\" for " << n
<< " times");
for (int i = 0; i < n; i++) {
if (i == n / 2) {
log_info("Half of the test have already passed");
}
create_infer_request(model, target_device)();
create_infer_request(model, target_device, api_version)();
}
}
void test_infer_request_inference(const std::string &model, const std::string &target_device, const int &n) {
void test_infer_request_inference(const std::string &model, const std::string &target_device, const int &n,
const int &api_version) {
log_info("Inference of InferRequest from network: \"" << model
<< "\" for device: \"" << target_device << "\" for " << n << " times");
<< "\" for device: \"" << target_device << "\" for " << n
<< " times");
for (int i = 0; i < n; i++) {
if (i == n / 2) {
log_info("Half of the test have already passed");
}
infer_request_inference(model, target_device)();
infer_request_inference(model, target_device, api_version)();
}
}

View File

@ -10,21 +10,47 @@
#include <string>
// tests_pipelines/tests_pipelines.cpp
void test_load_unload_plugin(const std::string &model, const std::string &target_device, const int &n);
void test_read_network(const std::string &model, const std::string &target_device, const int &n);
void test_cnnnetwork_reshape_batch_x2(const std::string &model, const std::string &target_device, const int &n);
void test_set_input_params(const std::string &model, const std::string &target_device, const int &n);
void test_create_exenetwork(const std::string &model, const std::string &target_device, const int &n);
void test_create_infer_request(const std::string &model, const std::string &target_device, const int &n);
void test_infer_request_inference(const std::string &model, const std::string &target_device, const int &n);
void test_load_unload_plugin(const std::string &model, const std::string &target_device, const int &n,
const int &api_version);
void test_read_network(const std::string &model, const std::string &target_device, const int &n,
const int &api_version);
void test_cnnnetwork_reshape_batch_x2(const std::string &model, const std::string &target_device, const int &n,
const int &api_version);
void test_set_input_params(const std::string &model, const std::string &target_device, const int &n,
const int &api_version);
void test_create_compiled_model(const std::string &model, const std::string &target_device, const int &n,
const int &api_version);
void test_create_infer_request(const std::string &model, const std::string &target_device, const int &n,
const int &api_version);
void test_infer_request_inference(const std::string &model, const std::string &target_device, const int &n,
const int &api_version);
// tests_pipelines/tests_pipelines.cpp
// tests_pipelines/tests_pipelines_full_pipeline.cpp
void test_load_unload_plugin_full_pipeline(const std::string &model, const std::string &target_device, const int &n);
void test_read_network_full_pipeline(const std::string &model, const std::string &target_device, const int &n);
void test_set_input_params_full_pipeline(const std::string &model, const std::string &target_device, const int &n);
void test_cnnnetwork_reshape_batch_x2_full_pipeline(const std::string &model, const std::string &target_device, const int &n);
void test_create_exenetwork_full_pipeline(const std::string &model, const std::string &target_device, const int &n);
void test_create_infer_request_full_pipeline(const std::string &model, const std::string &target_device, const int &n);
void test_infer_request_inference_full_pipeline(const std::string &model, const std::string &target_device, const int &n);
void test_load_unload_plugin_full_pipeline(const std::string &model, const std::string &target_device, const int &n,
const int &api_version);
void test_read_network_full_pipeline(const std::string &model, const std::string &target_device, const int &n,
const int &api_version);
void test_set_input_params_full_pipeline(const std::string &model, const std::string &target_device, const int &n,
const int &api_version);
void test_cnnnetwork_reshape_batch_x2_full_pipeline(const std::string &model, const std::string &target_device,
const int &n, const int &api_version);
void test_create_exenetwork_full_pipeline(const std::string &model, const std::string &target_device, const int &n,
const int &api_version);
void test_create_infer_request_full_pipeline(const std::string &model, const std::string &target_device, const int &n,
const int &api_version);
void test_infer_request_inference_full_pipeline(const std::string &model, const std::string &target_device,
const int &n, const int &api_version);
// tests_pipelines/tests_pipelines_full_pipeline.cpp

View File

@ -3,13 +3,10 @@
//
#include "tests_pipelines.h"
#include "common_utils.h"
#include <string>
#include <openvino/core/preprocess/pre_post_process.hpp>
#include <inference_engine.hpp>
using namespace InferenceEngine;
#define batchIndex 0
@ -34,247 +31,126 @@ using namespace InferenceEngine;
else \
throw std::logic_error("Reshape wasn't applied for a model.");
void test_load_unload_plugin_full_pipeline(const std::string &model, const std::string &target_device, const int &n) {
void test_load_unload_plugin_full_pipeline(const std::string &model, const std::string &target_device, const int &n,
const int &api_version) {
log_info("Load/unload plugin for device: " << target_device << " for " << n << " times");
Core ie;
auto ie_api_wrapper = create_infer_api_wrapper(api_version);
for (int i = 0; i < n; i++) {
if (i == n / 2) {
log_info("Half of the test have already passed");
}
// GetVersions silently register plugin in `plugins` through `GetCPPPluginByName`
ie.GetVersions(target_device);
// get_versions silently register plugin in `plugins` through `GetCPPPluginByName`
ie_api_wrapper->load_plugin(target_device);
// Remove plugin for target_device from `plugins`
ie.UnregisterPlugin(target_device);
ie_api_wrapper->unload_plugin(target_device);
}
CNNNetwork cnnNetwork = ie.ReadNetwork(model);
InputsDataMap inputInfo(cnnNetwork.getInputsInfo());
ICNNNetwork::InputShapes shapes = cnnNetwork.getInputShapes();
bool doReshape = false;
for (auto &input : inputInfo) {
setInputParameters();
computeShapesToReshape();
}
reshapeCNNNetwork();
ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, target_device);
InferRequest infer_request = exeNetwork.CreateInferRequest();
auto batchSize = cnnNetwork.getBatchSize();
batchSize = batchSize != 0 ? batchSize : 1;
const ConstInputsDataMap inputsInfo(exeNetwork.GetInputsInfo());
fillBlobs(infer_request, inputsInfo, batchSize);
infer_request.Infer();
OutputsDataMap output_info(cnnNetwork.getOutputsInfo());
for (auto &output : output_info)
Blob::Ptr outputBlob = infer_request.GetBlob(output.first);
ie_api_wrapper->read_network(model);
ie_api_wrapper->load_network(target_device);
ie_api_wrapper->create_infer_request();
ie_api_wrapper->prepare_input();
ie_api_wrapper->infer();
}
void test_read_network_full_pipeline(const std::string &model, const std::string &target_device, const int &n) {
void test_read_network_full_pipeline(const std::string &model, const std::string &target_device, const int &n,
const int &api_version) {
auto ie_api_wrapper = create_infer_api_wrapper(api_version);
log_info("Read network: \"" << model << "\" for " << n << " times");
Core ie;
CNNNetwork cnnNetwork;
for (int i = 0; i < n; i++) {
if (i == n / 2) {
log_info("Half of the test have already passed");
}
cnnNetwork = ie.ReadNetwork(model);
ie_api_wrapper->read_network(model);
}
InputsDataMap inputInfo(cnnNetwork.getInputsInfo());
ICNNNetwork::InputShapes shapes = cnnNetwork.getInputShapes();
bool doReshape = false;
for (auto &input : inputInfo) {
setInputParameters();
computeShapesToReshape();
}
reshapeCNNNetwork();
ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, target_device);
InferRequest infer_request = exeNetwork.CreateInferRequest();
auto batchSize = cnnNetwork.getBatchSize();
batchSize = batchSize != 0 ? batchSize : 1;
const ConstInputsDataMap inputsInfo(exeNetwork.GetInputsInfo());
fillBlobs(infer_request, inputsInfo, batchSize);
infer_request.Infer();
OutputsDataMap output_info(cnnNetwork.getOutputsInfo());
for (auto &output : output_info)
Blob::Ptr outputBlob = infer_request.GetBlob(output.first);
ie_api_wrapper->load_network(target_device);
ie_api_wrapper->create_infer_request();
ie_api_wrapper->prepare_input();
ie_api_wrapper->infer();
}
void test_set_input_params_full_pipeline(const std::string &model, const std::string &target_device, const int &n) {
void test_set_input_params_full_pipeline(const std::string &model, const std::string &target_device, const int &n,
const int &api_version) {
auto ie_api_wrapper = create_infer_api_wrapper(api_version);
log_info("Apply preprocessing for CNNNetwork from network: \"" << model << "\" for " << n << " times");
Core ie;
CNNNetwork cnnNetwork = ie.ReadNetwork(model);
InputsDataMap inputInfo(cnnNetwork.getInputsInfo());
for (int i = 0; i < n; i++) {
if (i == n / 2) {
log_info("Half of the test have already passed");
}
for (auto &input : inputInfo) {
setInputParameters();
ie_api_wrapper->set_input_params(model);
}
}
ICNNNetwork::InputShapes shapes = cnnNetwork.getInputShapes();
bool doReshape = false;
for (auto &input : inputInfo) {
computeShapesToReshape();
}
reshapeCNNNetwork();
ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, target_device);
InferRequest infer_request = exeNetwork.CreateInferRequest();
auto batchSize = cnnNetwork.getBatchSize();
batchSize = batchSize != 0 ? batchSize : 1;
const ConstInputsDataMap inputsInfo(exeNetwork.GetInputsInfo());
fillBlobs(infer_request, inputsInfo, batchSize);
infer_request.Infer();
OutputsDataMap output_info(cnnNetwork.getOutputsInfo());
for (auto &output : output_info)
Blob::Ptr outputBlob = infer_request.GetBlob(output.first);
ie_api_wrapper->load_network(target_device);
ie_api_wrapper->create_infer_request();
ie_api_wrapper->prepare_input();
ie_api_wrapper->infer();
}
void test_cnnnetwork_reshape_batch_x2_full_pipeline(const std::string &model, const std::string &target_device, const int &n) {
void test_cnnnetwork_reshape_batch_x2_full_pipeline(const std::string &model, const std::string &target_device,
const int &n, const int &api_version) {
auto ie_api_wrapper = create_infer_api_wrapper(api_version);
log_info("Reshape to batch*=2 of CNNNetwork created from network: \"" << model << "\" for " << n << " times");
Core ie;
CNNNetwork cnnNetwork = ie.ReadNetwork(model);
InputsDataMap inputInfo(cnnNetwork.getInputsInfo());
for (auto &input : inputInfo) {
setInputParameters();
}
ICNNNetwork::InputShapes shapes = cnnNetwork.getInputShapes();
bool doReshape = false;
int prev_batch = -1, new_batch;
for (auto &input : inputInfo) {
auto layout = input.second->getTensorDesc().getLayout();
if ((layout == Layout::NCHW) || (layout == Layout::NC))
prev_batch = shapes[input.first][batchIndex];
}
if (prev_batch == -1)
throw std::logic_error("Reshape wasn't applied for a model.");
ie_api_wrapper->read_network(model);
for (int i = 0; i < n; i++) {
if (i == n / 2) {
log_info("Half of the test have already passed");
}
new_batch = ((i % 2) == 0) ? prev_batch * 2 : prev_batch;
for (auto &input : inputInfo) {
auto layout = input.second->getTensorDesc().getLayout();
if ((layout == Layout::NCHW) || (layout == Layout::NC)) {
shapes[input.first][batchIndex] = new_batch;
doReshape = true;
ie_api_wrapper->change_batch_size(2, i);
}
}
reshapeCNNNetwork();
}
ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, target_device);
InferRequest infer_request = exeNetwork.CreateInferRequest();
auto batchSize = cnnNetwork.getBatchSize();
batchSize = batchSize != 0 ? batchSize : 1;
const ConstInputsDataMap inputsInfo(exeNetwork.GetInputsInfo());
fillBlobs(infer_request, inputsInfo, batchSize);
infer_request.Infer();
OutputsDataMap output_info(cnnNetwork.getOutputsInfo());
for (auto &output : output_info)
Blob::Ptr outputBlob = infer_request.GetBlob(output.first);
ie_api_wrapper->load_network(target_device);
ie_api_wrapper->create_infer_request();
ie_api_wrapper->prepare_input();
ie_api_wrapper->infer();
}
void test_create_exenetwork_full_pipeline(const std::string &model, const std::string &target_device, const int &n) {
void test_create_exenetwork_full_pipeline(const std::string &model, const std::string &target_device, const int &n,
const int &api_version) {
auto ie_api_wrapper = create_infer_api_wrapper(api_version);
log_info("Create ExecutableNetwork from network: \"" << model
<< "\" for device: \"" << target_device << "\" for " << n << " times");
Core ie;
CNNNetwork cnnNetwork = ie.ReadNetwork(model);
InputsDataMap inputInfo(cnnNetwork.getInputsInfo());
ICNNNetwork::InputShapes shapes = cnnNetwork.getInputShapes();
bool doReshape = false;
for (auto &input : inputInfo) {
setInputParameters();
computeShapesToReshape();
}
reshapeCNNNetwork();
ExecutableNetwork exeNetwork;
<< "\" for device: \"" << target_device << "\" for " << n
<< " times");
ie_api_wrapper->read_network(model);
for (int i = 0; i < n; i++) {
if (i == n / 2) {
log_info("Half of the test have already passed");
}
exeNetwork = ie.LoadNetwork(cnnNetwork, target_device);
ie_api_wrapper->load_network(target_device);
}
InferRequest infer_request = exeNetwork.CreateInferRequest();
auto batchSize = cnnNetwork.getBatchSize();
batchSize = batchSize != 0 ? batchSize : 1;
const ConstInputsDataMap inputsInfo(exeNetwork.GetInputsInfo());
fillBlobs(infer_request, inputsInfo, batchSize);
infer_request.Infer();
OutputsDataMap output_info(cnnNetwork.getOutputsInfo());
for (auto &output : output_info)
Blob::Ptr outputBlob = infer_request.GetBlob(output.first);
ie_api_wrapper->create_infer_request();
ie_api_wrapper->prepare_input();
ie_api_wrapper->infer();
}
void test_create_infer_request_full_pipeline(const std::string &model, const std::string &target_device, const int &n) {
void test_create_infer_request_full_pipeline(const std::string &model, const std::string &target_device, const int &n,
const int &api_version) {
auto ie_api_wrapper = create_infer_api_wrapper(api_version);
log_info("Create InferRequest from network: \"" << model
<< "\" for device: \"" << target_device << "\" for " << n << " times");
Core ie;
CNNNetwork cnnNetwork = ie.ReadNetwork(model);
InputsDataMap inputInfo(cnnNetwork.getInputsInfo());
ICNNNetwork::InputShapes shapes = cnnNetwork.getInputShapes();
bool doReshape = false;
for (auto &input : inputInfo) {
setInputParameters();
computeShapesToReshape();
}
reshapeCNNNetwork();
ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, target_device);
InferRequest infer_request;
auto batchSize = cnnNetwork.getBatchSize();
batchSize = batchSize != 0 ? batchSize : 1;
const ConstInputsDataMap inputsInfo(exeNetwork.GetInputsInfo());
<< "\" for device: \"" << target_device << "\" for " << n
<< " times");
ie_api_wrapper->read_network(model);
ie_api_wrapper->load_network(target_device);
for (int i = 0; i < n; i++) {
if (i == n / 2) {
log_info("Half of the test have already passed");
}
infer_request = exeNetwork.CreateInferRequest();
fillBlobs(infer_request, inputsInfo, batchSize);
ie_api_wrapper->create_infer_request();
ie_api_wrapper->prepare_input();
}
infer_request.Infer();
OutputsDataMap output_info(cnnNetwork.getOutputsInfo());
for (auto &output : output_info)
Blob::Ptr outputBlob = infer_request.GetBlob(output.first);
ie_api_wrapper->infer();
}
void test_infer_request_inference_full_pipeline(const std::string &model, const std::string &target_device, const int &n) {
void test_infer_request_inference_full_pipeline(const std::string &model, const std::string &target_device,
const int &n, const int &api_version) {
auto ie_api_wrapper = create_infer_api_wrapper(api_version);
log_info("Inference of InferRequest from network: \"" << model
<< "\" for device: \"" << target_device << "\" for " << n << " times");
Core ie;
CNNNetwork cnnNetwork = ie.ReadNetwork(model);
InputsDataMap inputInfo(cnnNetwork.getInputsInfo());
ICNNNetwork::InputShapes shapes = cnnNetwork.getInputShapes();
bool doReshape = false;
for (auto &input : inputInfo) {
setInputParameters();
computeShapesToReshape();
}
reshapeCNNNetwork();
ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, target_device);
InferRequest infer_request = exeNetwork.CreateInferRequest();
auto batchSize = cnnNetwork.getBatchSize();
batchSize = batchSize != 0 ? batchSize : 1;
const ConstInputsDataMap inputsInfo(exeNetwork.GetInputsInfo());
fillBlobs(infer_request, inputsInfo, batchSize);
<< "\" for device: \"" << target_device << "\" for " << n
<< " times");
ie_api_wrapper->read_network(model);
ie_api_wrapper->load_network(target_device);
ie_api_wrapper->create_infer_request();
ie_api_wrapper->prepare_input();
for (int i = 0; i < n; i++) {
if (i == n / 2) {
log_info("Half of the test have already passed");
}
infer_request.Infer();
OutputsDataMap output_info(cnnNetwork.getOutputsInfo());
for (auto &output : output_info)
Blob::Ptr outputBlob = infer_request.GetBlob(output.first);
ie_api_wrapper->infer();
}
}