diff --git a/tests/lib/src/common_utils.cpp b/tests/lib/src/common_utils.cpp index 21776845d1e..a397866a440 100644 --- a/tests/lib/src/common_utils.cpp +++ b/tests/lib/src/common_utils.cpp @@ -5,54 +5,84 @@ #include "common_utils.h" #include +#include -using namespace InferenceEngine; /** * @brief Fill InferRequest blobs with random values or image information */ void fillBlobs(InferenceEngine::InferRequest inferRequest, - const InferenceEngine::ConstInputsDataMap& inputsInfo, - const size_t& batchSize) { - std::vector> input_image_sizes; - for (const ConstInputsDataMap::value_type& item : inputsInfo) { - if (isImage(item.second)) - input_image_sizes.push_back(getTensorHeightWidth(item.second->getTensorDesc())); - } + const InferenceEngine::ConstInputsDataMap &inputsInfo, + const size_t &batchSize) { + std::vector> input_image_sizes; + for (const InferenceEngine::ConstInputsDataMap::value_type &item: inputsInfo) { + if (isImage(item.second)) + input_image_sizes.push_back(getTensorHeightWidth(item.second->getTensorDesc())); + } - for (const ConstInputsDataMap::value_type& item : inputsInfo) { - Blob::Ptr inputBlob = inferRequest.GetBlob(item.first); - if (isImageInfo(inputBlob) && (input_image_sizes.size() == 1)) { - // Fill image information - auto image_size = input_image_sizes.at(0); - if (item.second->getPrecision() == InferenceEngine::Precision::FP32) { - fillBlobImInfo(inputBlob, batchSize, image_size); - } else if (item.second->getPrecision() == InferenceEngine::Precision::FP16) { - fillBlobImInfo(inputBlob, batchSize, image_size); - } else if (item.second->getPrecision() == InferenceEngine::Precision::I32) { - fillBlobImInfo(inputBlob, batchSize, image_size); - } else { - throw std::logic_error("Input precision is not supported for image info!"); - } - continue; + for (const InferenceEngine::ConstInputsDataMap::value_type &item: inputsInfo) { + InferenceEngine::Blob::Ptr inputBlob = inferRequest.GetBlob(item.first); + if (isImageInfo(inputBlob) && (input_image_sizes.size() == 1)) { + // Fill image information + auto image_size = input_image_sizes.at(0); + if (item.second->getPrecision() == InferenceEngine::Precision::FP32) { + fillBlobImInfo(inputBlob, batchSize, image_size); + } else if (item.second->getPrecision() == InferenceEngine::Precision::FP16) { + fillBlobImInfo(inputBlob, batchSize, image_size); + } else if (item.second->getPrecision() == InferenceEngine::Precision::I32) { + fillBlobImInfo(inputBlob, batchSize, image_size); + } else if (item.second->getPrecision() == InferenceEngine::Precision::U8) { + fillBlobImInfo(inputBlob, batchSize, image_size); + } else { + throw std::logic_error("Input precision is not supported for image info!"); + } + continue; + } + // Fill random + if (item.second->getPrecision() == InferenceEngine::Precision::FP32) { + fillBlobRandom(inputBlob); + } else if (item.second->getPrecision() == InferenceEngine::Precision::FP16) { + fillBlobRandom(inputBlob); + } else if (item.second->getPrecision() == InferenceEngine::Precision::I32) { + fillBlobRandom(inputBlob); + } else if (item.second->getPrecision() == InferenceEngine::Precision::U8) { + fillBlobRandom(inputBlob); + } else if (item.second->getPrecision() == InferenceEngine::Precision::I8) { + fillBlobRandom(inputBlob); + } else if (item.second->getPrecision() == InferenceEngine::Precision::U16) { + fillBlobRandom(inputBlob); + } else if (item.second->getPrecision() == InferenceEngine::Precision::I16) { + fillBlobRandom(inputBlob); + } else { + throw std::logic_error("Input precision is not supported for " + item.first); + } } - // Fill random - if (item.second->getPrecision() == InferenceEngine::Precision::FP32) { - fillBlobRandom(inputBlob); - } else if (item.second->getPrecision() == InferenceEngine::Precision::FP16) { - fillBlobRandom(inputBlob); - } else if (item.second->getPrecision() == InferenceEngine::Precision::I32) { - fillBlobRandom(inputBlob); - } else if (item.second->getPrecision() == InferenceEngine::Precision::U8) { - fillBlobRandom(inputBlob); - } else if (item.second->getPrecision() == InferenceEngine::Precision::I8) { - fillBlobRandom(inputBlob); - } else if (item.second->getPrecision() == InferenceEngine::Precision::U16) { - fillBlobRandom(inputBlob); - } else if (item.second->getPrecision() == InferenceEngine::Precision::I16) { - fillBlobRandom(inputBlob); - } else { - throw std::logic_error("Input precision is not supported for " + item.first); +} + +/** + * @brief Fill infer_request tensors with random values or image information + */ +void fillTensors(ov::InferRequest &infer_request, const std::vector> &inputs) { + for (size_t i = 0; i < inputs.size(); ++i) { + ov::Tensor input_tensor; + if (inputs[i].get_element_type() == ov::element::f32) { + input_tensor = fillTensorRandom(inputs[i]); + } else if (inputs[i].get_element_type() == ov::element::f16) { + input_tensor = fillTensorRandom(inputs[i]); + } else if (inputs[i].get_element_type() == ov::element::i32) { + input_tensor = fillTensorRandom(inputs[i]); + } else if (inputs[i].get_element_type() == ov::element::u8) { + input_tensor = fillTensorRandom(inputs[i]); + } else if (inputs[i].get_element_type() == ov::element::i8) { + input_tensor = fillTensorRandom(inputs[i]); + } else if (inputs[i].get_element_type() == ov::element::u16) { + input_tensor = fillTensorRandom(inputs[i]); + } else if (inputs[i].get_element_type() == ov::element::i16) { + input_tensor = fillTensorRandom(inputs[i]); + } else { + throw std::logic_error( + "Input precision is not supported for " + inputs[i].get_element_type().get_type_name()); + } + infer_request.set_input_tensor(i, input_tensor); } - } } \ No newline at end of file diff --git a/tests/lib/src/common_utils.h b/tests/lib/src/common_utils.h index 6d1f669572c..f05d0a12413 100644 --- a/tests/lib/src/common_utils.h +++ b/tests/lib/src/common_utils.h @@ -5,103 +5,118 @@ #pragma once #include +#include -using namespace InferenceEngine; /** - * @brief Determine if InferenceEngine blob means image or not + * @brief Determine if InferenceEngine blob means image or not (OV API 1.0) */ template static bool isImage(const T &blob) { - auto descriptor = blob->getTensorDesc(); - if (descriptor.getLayout() != InferenceEngine::NCHW) { - return false; - } - auto channels = descriptor.getDims()[1]; - return channels == 3; + auto descriptor = blob->getTensorDesc(); + if (descriptor.getLayout() != InferenceEngine::NCHW) { + return false; + } + auto channels = descriptor.getDims()[1]; + return channels == 3; } /** - * @brief Determine if InferenceEngine blob means image information or not + * @brief Determine if InferenceEngine blob means image information or not (OV API 1.0) */ template static bool isImageInfo(const T &blob) { - auto descriptor = blob->getTensorDesc(); - if (descriptor.getLayout() != InferenceEngine::NC) { - return false; - } - auto channels = descriptor.getDims()[1]; - return (channels >= 2); + auto descriptor = blob->getTensorDesc(); + if (descriptor.getLayout() != InferenceEngine::NC) { + return false; + } + auto channels = descriptor.getDims()[1]; + return (channels >= 2); } /** - * @brief Return height and width from provided InferenceEngine tensor description + * @brief Return height and width from provided InferenceEngine tensor description (OV API 1) */ -inline std::pair getTensorHeightWidth(const InferenceEngine::TensorDesc& desc) { - const auto& layout = desc.getLayout(); - const auto& dims = desc.getDims(); - const auto& size = dims.size(); - if ((size >= 2) && - (layout == InferenceEngine::Layout::NCHW || - layout == InferenceEngine::Layout::NHWC || - layout == InferenceEngine::Layout::NCDHW || - layout == InferenceEngine::Layout::NDHWC || - layout == InferenceEngine::Layout::OIHW || - layout == InferenceEngine::Layout::GOIHW || - layout == InferenceEngine::Layout::OIDHW || - layout == InferenceEngine::Layout::GOIDHW || - layout == InferenceEngine::Layout::CHW || - layout == InferenceEngine::Layout::HW)) { - // Regardless of layout, dimensions are stored in fixed order - return std::make_pair(dims.back(), dims.at(size - 2)); - } else { - throw std::logic_error("Tensor does not have height and width dimensions"); - } +inline std::pair getTensorHeightWidth(const InferenceEngine::TensorDesc &desc) { + const auto &layout = desc.getLayout(); + const auto &dims = desc.getDims(); + const auto &size = dims.size(); + if ((size >= 2) && + (layout == InferenceEngine::Layout::NCHW || + layout == InferenceEngine::Layout::NHWC || + layout == InferenceEngine::Layout::NCDHW || + layout == InferenceEngine::Layout::NDHWC || + layout == InferenceEngine::Layout::OIHW || + layout == InferenceEngine::Layout::GOIHW || + layout == InferenceEngine::Layout::OIDHW || + layout == InferenceEngine::Layout::GOIDHW || + layout == InferenceEngine::Layout::CHW || + layout == InferenceEngine::Layout::HW)) { + // Regardless of layout, dimensions are stored in fixed order + return std::make_pair(dims.back(), dims.at(size - 2)); + } else { + throw std::logic_error("Tensor does not have height and width dimensions"); + } } + /** * @brief Fill InferenceEngine blob with random values */ template -void fillBlobRandom(Blob::Ptr& inputBlob) { - MemoryBlob::Ptr minput = as(inputBlob); - // locked memory holder should be alive all time while access to its buffer happens - auto minputHolder = minput->wmap(); +void fillBlobRandom(InferenceEngine::Blob::Ptr &inputBlob) { + auto minput = InferenceEngine::as(inputBlob); + // locked memory holder should be alive all time while access to its buffer happens + auto minputHolder = minput->wmap(); - auto inputBlobData = minputHolder.as(); - for (size_t i = 0; i < inputBlob->size(); i++) { - auto rand_max = RAND_MAX; - inputBlobData[i] = (T) rand() / static_cast(rand_max) * 10; - } + auto inputBlobData = minputHolder.as(); + for (size_t i = 0; i < inputBlob->size(); i++) { + auto rand_max = RAND_MAX; + inputBlobData[i] = (T) rand() / static_cast(rand_max) * 10; + } +} + +/** + * @brief Fill InferenceEngine tensor with random values (OV API 2.0) + */ +template +ov::Tensor fillTensorRandom(const ov::Output &input) { + ov::Tensor tensor{input.get_element_type(), input.get_shape()}; + std::vector values(ov::shape_size(input.get_shape())); + for (size_t i = 0; i < values.size(); ++i) { + values[i] = 1 + static_cast (rand()) / (static_cast (RAND_MAX / (std::numeric_limits::max() - 1))); + } + std::memcpy(tensor.data(), values.data(), sizeof(T) * values.size()); + return tensor; } /** - * @brief Fill InferenceEngine blob with image information + * @brief Fill InferenceEngine blob with image information (OV API 1.0) */ template -void fillBlobImInfo(Blob::Ptr& inputBlob, - const size_t& batchSize, - std::pair image_size) { - MemoryBlob::Ptr minput = as(inputBlob); - // locked memory holder should be alive all time while access to its buffer happens - auto minputHolder = minput->wmap(); +void fillBlobImInfo(InferenceEngine::Blob::Ptr &inputBlob, + const size_t &batchSize, + std::pair image_size) { + InferenceEngine::MemoryBlob::Ptr minput = InferenceEngine::as(inputBlob); + // locked memory holder should be alive all time while access to its buffer happens + auto minputHolder = minput->wmap(); - auto inputBlobData = minputHolder.as(); - for (size_t b = 0; b < batchSize; b++) { - size_t iminfoSize = inputBlob->size()/batchSize; - for (size_t i = 0; i < iminfoSize; i++) { - size_t index = b*iminfoSize + i; - if (0 == i) - inputBlobData[index] = static_cast(image_size.first); - else if (1 == i) - inputBlobData[index] = static_cast(image_size.second); - else - inputBlobData[index] = 1; + auto inputBlobData = minputHolder.as(); + for (size_t b = 0; b < batchSize; b++) { + size_t iminfoSize = inputBlob->size() / batchSize; + for (size_t i = 0; i < iminfoSize; i++) { + size_t index = b * iminfoSize + i; + if (0 == i) + inputBlobData[index] = static_cast(image_size.first); + else if (1 == i) + inputBlobData[index] = static_cast(image_size.second); + else + inputBlobData[index] = 1; + } } - } } @@ -109,5 +124,11 @@ void fillBlobImInfo(Blob::Ptr& inputBlob, * @brief Fill InferRequest blobs with random values or image information */ void fillBlobs(InferenceEngine::InferRequest inferRequest, - const InferenceEngine::ConstInputsDataMap& inputsInfo, - const size_t& batchSize); \ No newline at end of file + const InferenceEngine::ConstInputsDataMap &inputsInfo, + const size_t &batchSize); + +/** + * @brief Fill InferRequest tensors with random values or image information + */ +void fillTensors(ov::InferRequest &infer_request, + const std::vector> &inputs); diff --git a/tests/stress_tests/.automation/memcheck_tests/nightly_configs/desktop_test_config.xml b/tests/stress_tests/.automation/memcheck_tests/nightly_configs/desktop_test_config.xml index 0cde41da1b7..a4456881c73 100644 --- a/tests/stress_tests/.automation/memcheck_tests/nightly_configs/desktop_test_config.xml +++ b/tests/stress_tests/.automation/memcheck_tests/nightly_configs/desktop_test_config.xml @@ -3,6 +3,10 @@ CPU GPU + + 1 + 2 + diff --git a/tests/stress_tests/.automation/memcheck_tests/nightly_configs/myriad_test_config.xml b/tests/stress_tests/.automation/memcheck_tests/nightly_configs/myriad_test_config.xml index 6e8337c369b..de11477f588 100644 --- a/tests/stress_tests/.automation/memcheck_tests/nightly_configs/myriad_test_config.xml +++ b/tests/stress_tests/.automation/memcheck_tests/nightly_configs/myriad_test_config.xml @@ -2,6 +2,10 @@ MYRIAD + + 1 + 2 + diff --git a/tests/stress_tests/.automation/memcheck_tests/precommit_configs/desktop_test_config.xml b/tests/stress_tests/.automation/memcheck_tests/precommit_configs/desktop_test_config.xml index 03ac32f1320..badeba53504 100644 --- a/tests/stress_tests/.automation/memcheck_tests/precommit_configs/desktop_test_config.xml +++ b/tests/stress_tests/.automation/memcheck_tests/precommit_configs/desktop_test_config.xml @@ -3,6 +3,10 @@ CPU GPU + + 1 + 2 + diff --git a/tests/stress_tests/.automation/memcheck_tests/weekly_configs/desktop_test_config.xml b/tests/stress_tests/.automation/memcheck_tests/weekly_configs/desktop_test_config.xml index ed189715b2a..5eb17bb1e93 100644 --- a/tests/stress_tests/.automation/memcheck_tests/weekly_configs/desktop_test_config.xml +++ b/tests/stress_tests/.automation/memcheck_tests/weekly_configs/desktop_test_config.xml @@ -3,6 +3,10 @@ CPU GPU + + 1 + 2 + diff --git a/tests/stress_tests/.automation/unittests/nightly_configs/desktop_test_config.xml b/tests/stress_tests/.automation/unittests/nightly_configs/desktop_test_config.xml index fc2ed5cba5c..175fd5a416b 100644 --- a/tests/stress_tests/.automation/unittests/nightly_configs/desktop_test_config.xml +++ b/tests/stress_tests/.automation/unittests/nightly_configs/desktop_test_config.xml @@ -14,6 +14,10 @@ CPU GPU + + 1 + 2 + diff --git a/tests/stress_tests/.automation/unittests/nightly_configs/myriad_test_config.xml b/tests/stress_tests/.automation/unittests/nightly_configs/myriad_test_config.xml index b2f9a839233..b2a20ab16a8 100644 --- a/tests/stress_tests/.automation/unittests/nightly_configs/myriad_test_config.xml +++ b/tests/stress_tests/.automation/unittests/nightly_configs/myriad_test_config.xml @@ -13,6 +13,10 @@ MYRIAD + + 1 + 2 + diff --git a/tests/stress_tests/.automation/unittests/weekly_configs/desktop_test_config.xml b/tests/stress_tests/.automation/unittests/weekly_configs/desktop_test_config.xml index 6b3a2316070..d900df327aa 100644 --- a/tests/stress_tests/.automation/unittests/weekly_configs/desktop_test_config.xml +++ b/tests/stress_tests/.automation/unittests/weekly_configs/desktop_test_config.xml @@ -15,6 +15,10 @@ CPU GPU + + 1 + 2 + diff --git a/tests/stress_tests/common/ie_pipelines/pipelines.cpp b/tests/stress_tests/common/ie_pipelines/pipelines.cpp index 91749aef357..f7378de9535 100644 --- a/tests/stress_tests/common/ie_pipelines/pipelines.cpp +++ b/tests/stress_tests/common/ie_pipelines/pipelines.cpp @@ -10,162 +10,120 @@ #include #include +#include -using namespace InferenceEngine; -std::function load_unload_plugin(const std::string &target_device) { +std::function load_unload_plugin(const std::string &target_device, const int &api_version) { return [&] { - Core ie; - // GetVersions silently register plugin in `plugins` through `GetCPPPluginByName` - ie.GetVersions(target_device); + auto ie_api_wrapper = create_infer_api_wrapper(api_version); + // get_versions silently register plugin in `plugins` through `GetCPPPluginByName` + ie_api_wrapper->load_plugin(target_device); // Remove plugin for target_device from `plugins` - ie.UnregisterPlugin(target_device); + ie_api_wrapper->unload_plugin(target_device); }; } -std::function read_cnnnetwork(const std::string &model) { +std::function read_cnnnetwork(const std::string &model, const int &api_version) { return [&] { - Core ie; - CNNNetwork cnnNetwork = ie.ReadNetwork(model); + auto ie_api_wrapper = create_infer_api_wrapper(api_version); + ie_api_wrapper->read_network(model); }; } -std::function cnnnetwork_reshape_batch_x2(const std::string &model) { +std::function cnnnetwork_reshape_batch_x2(const std::string &model, const int &iter, const int &api_version) { return [&] { - Core ie; - CNNNetwork cnnNetwork = ie.ReadNetwork(model); - const InputsDataMap inputInfo(cnnNetwork.getInputsInfo()); - ICNNNetwork::InputShapes shapes = cnnNetwork.getInputShapes(); - bool doReshape = false; - for (const InputsDataMap::value_type& input : inputInfo) { - int batchIndex = -1; - auto layout = input.second->getTensorDesc().getLayout(); - if ((layout == Layout::NCHW) || (layout == Layout::NCDHW) || - (layout == Layout::NHWC) || (layout == Layout::NDHWC) || - (layout == Layout::NC)) { - batchIndex = 0; - } else if (layout == CN) { - batchIndex = 1; - } - if (batchIndex != -1) { - shapes[input.first][batchIndex] *= 2; - doReshape = true; - } - } - if (doReshape) - cnnNetwork.reshape(shapes); - else - throw std::logic_error("Reshape wasn't applied for a model."); + auto ie_api_wrapper = create_infer_api_wrapper(api_version); + ie_api_wrapper->read_network(model); + ie_api_wrapper->change_batch_size(2, iter); }; } -std::function set_input_params(const std::string &model) { +std::function set_input_params(const std::string &model, const int &api_version) { return [&] { - Core ie; - CNNNetwork cnnNetwork = ie.ReadNetwork(model); - InputsDataMap inputInfo(cnnNetwork.getInputsInfo()); - for (auto &input : inputInfo) { - input.second->getPreProcess().setResizeAlgorithm(NO_RESIZE); - input.second->setPrecision(Precision::U8); - if (input.second->getInputData()->getTensorDesc().getDims().size() == 4) - input.second->setLayout(Layout::NCHW); - else if (input.second->getInputData()->getTensorDesc().getDims().size() == 2) - input.second->setLayout(Layout::NC); - else - throw std::logic_error("Setting of input parameters wasn't applied for a model."); - } + auto ie_api_wrapper = create_infer_api_wrapper(api_version); + ie_api_wrapper->read_network(model); + ie_api_wrapper->set_input_params(model); }; } -std::function create_exenetwork(const std::string &model, const std::string &target_device) { +std::function +create_compiled_model(const std::string &model, const std::string &target_device, const int &api_version) { return [&] { - Core ie; - CNNNetwork cnnNetwork = ie.ReadNetwork(model); - ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, target_device); + auto ie_api_wrapper = create_infer_api_wrapper(api_version); + ie_api_wrapper->read_network(model); + ie_api_wrapper->load_network(target_device); }; } -std::function recreate_exenetwork(Core &ie, const std::string &model, const std::string &target_device) { +std::function recreate_compiled_model(std::shared_ptr &ie_wrapper, const std::string &model, + const std::string &target_device, const int &api_version) { return [&] { - CNNNetwork cnnNetwork = ie.ReadNetwork(model); - ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, target_device); - }; -} - -std::function create_infer_request(const std::string &model, const std::string &target_device) { - return [&] { - Core ie; - CNNNetwork cnnNetwork = ie.ReadNetwork(model); - ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, target_device); - InferRequest infer_request = exeNetwork.CreateInferRequest(); + ie_wrapper->load_plugin(target_device); + ie_wrapper->read_network(model); + ie_wrapper->load_network(target_device); }; } -std::function recreate_infer_request(InferenceEngine::ExecutableNetwork& exeNetwork) { +std::function +create_infer_request(const std::string &model, const std::string &target_device, const int &api_version) { return [&] { - InferRequest infer_request = exeNetwork.CreateInferRequest(); + auto ie_api_wrapper = create_infer_api_wrapper(api_version); + ie_api_wrapper->read_network(model); + ie_api_wrapper->load_network(target_device); + ie_api_wrapper->create_infer_request(); }; } -std::function infer_request_inference(const std::string &model, const std::string &target_device) { + +std::function recreate_infer_request(std::shared_ptr &ie_wrapper) { return [&] { - Core ie; - CNNNetwork cnnNetwork = ie.ReadNetwork(model); - ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, target_device); - InferRequest infer_request = exeNetwork.CreateInferRequest(); - - auto batchSize = cnnNetwork.getBatchSize(); - batchSize = batchSize != 0 ? batchSize : 1; - const InferenceEngine::ConstInputsDataMap inputsInfo(exeNetwork.GetInputsInfo()); - fillBlobs(infer_request, inputsInfo, batchSize); - - infer_request.Infer(); - OutputsDataMap output_info(cnnNetwork.getOutputsInfo()); - for (auto &output : output_info) - Blob::Ptr outputBlob = infer_request.GetBlob(output.first); + ie_wrapper->create_infer_request(); }; } -std::function reinfer_request_inference(InferenceEngine::InferRequest& infer_request, InferenceEngine::OutputsDataMap& output_info) { + +std::function +infer_request_inference(const std::string &model, const std::string &target_device, const int &api_version) { return [&] { - infer_request.Infer(); - for (auto &output : output_info) - Blob::Ptr outputBlob = infer_request.GetBlob(output.first); + auto ie_api_wrapper = create_infer_api_wrapper(api_version); + ie_api_wrapper->read_network(model); + ie_api_wrapper->load_network(target_device); + ie_api_wrapper->create_infer_request(); + ie_api_wrapper->prepare_input(); + ie_api_wrapper->infer(); }; } -std::function inference_with_streams(const std::string &model, const std::string &target_device, const int& nstreams) { + +std::function reinfer_request_inference(std::shared_ptr &ie_wrapper) { return [&] { - std::map config; - config[target_device + "_THROUGHPUT_STREAMS"] = std::to_string(nstreams); + ie_wrapper->infer(); + }; +} - Core ie; - ie.GetVersions(target_device); - ie.SetConfig(config, target_device); - - InferRequest inferRequest; - - CNNNetwork cnnNetwork = ie.ReadNetwork(model); - ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, target_device); - auto batchSize = cnnNetwork.getBatchSize(); - batchSize = batchSize != 0 ? batchSize : 1; - const InferenceEngine::ConstInputsDataMap inputsInfo(exeNetwork.GetInputsInfo()); +std::function +inference_with_streams(const std::string &model, const std::string &target_device, const int &nstreams, + const int &api_version) { + return [&] { unsigned int nireq = nstreams; + auto ie_api_wrapper = create_infer_api_wrapper(api_version); + ie_api_wrapper->load_plugin(target_device); + ie_api_wrapper->set_config(target_device, "THROUGHPUT_STREAMS", nstreams); + ie_api_wrapper->read_network(model); + ie_api_wrapper->load_network(target_device); try { - nireq = exeNetwork.GetMetric(METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS)).as(); + nireq = ie_api_wrapper->get_property(METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS)); } catch (const std::exception &ex) { log_err("Failed to query OPTIMAL_NUMBER_OF_INFER_REQUESTS"); } - for (int counter = 0; counter < nireq; counter++) { - inferRequest = exeNetwork.CreateInferRequest(); - fillBlobs(inferRequest, inputsInfo, batchSize); - inferRequest.Infer(); - OutputsDataMap output_info(cnnNetwork.getOutputsInfo()); - for (auto &output : output_info) - Blob::Ptr outputBlob = inferRequest.GetBlob(output.first); + for (int counter = 0; counter < nireq; counter++) { + ie_api_wrapper->create_infer_request(); + ie_api_wrapper->prepare_input(); + + ie_api_wrapper->infer(); } }; } diff --git a/tests/stress_tests/common/ie_pipelines/pipelines.h b/tests/stress_tests/common/ie_pipelines/pipelines.h index 349ec16a34f..22457132c60 100644 --- a/tests/stress_tests/common/ie_pipelines/pipelines.h +++ b/tests/stress_tests/common/ie_pipelines/pipelines.h @@ -5,15 +5,33 @@ #include #include #include +#include "../infer_api/infer_api.h" -std::function load_unload_plugin(const std::string &target_device); -std::function read_cnnnetwork(const std::string &model); -std::function cnnnetwork_reshape_batch_x2(const std::string &model); -std::function set_input_params(const std::string &model); -std::function create_exenetwork(const std::string &model, const std::string &target_device); -std::function recreate_exenetwork(InferenceEngine::Core &ie, const std::string &model, const std::string &target_device); -std::function create_infer_request(const std::string &model, const std::string &target_device); -std::function recreate_infer_request(InferenceEngine::ExecutableNetwork& exeNetwork); -std::function infer_request_inference(const std::string &model, const std::string &target_device); -std::function reinfer_request_inference(InferenceEngine::InferRequest& infer_request, InferenceEngine::OutputsDataMap& output_info); -std::function inference_with_streams(const std::string &model, const std::string &target_device, const int& nstreams); +std::function load_unload_plugin(const std::string &target_device, const int &api_version); + +std::function read_cnnnetwork(const std::string &model, const int &api_version); + +std::function cnnnetwork_reshape_batch_x2(const std::string &model, const int &iter, const int &api_version); + +std::function set_input_params(const std::string &model, const int &api_version); + +std::function +create_compiled_model(const std::string &model, const std::string &target_device, const int &api_version); + +std::function +create_infer_request(const std::string &model, const std::string &target_device, const int &api_version); + +std::function +infer_request_inference(const std::string &model, const std::string &target_device, const int &api_version); + +std::function +inference_with_streams(const std::string &model, const std::string &target_device, const int &nstreams, + const int &api_version); + +std::function +recreate_compiled_model(std::shared_ptr &ie, const std::string &model, const std::string &target_device, + const int &api_version); + +std::function recreate_infer_request(std::shared_ptr &ie_wrapper); + +std::function reinfer_request_inference(std::shared_ptr &ie_wrapper); diff --git a/tests/stress_tests/common/infer_api/infer_api.cpp b/tests/stress_tests/common/infer_api/infer_api.cpp new file mode 100644 index 00000000000..316a002bbed --- /dev/null +++ b/tests/stress_tests/common/infer_api/infer_api.cpp @@ -0,0 +1,188 @@ +// Copyright (C) 2018-2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "infer_api.h" +#include "openvino/core/preprocess/pre_post_process.hpp" + + +InferAPI1::InferAPI1() = default; + +void InferAPI1::load_plugin(const std::string &device) { + ie.GetVersions(device); +} + +void InferAPI1::unload_plugin(const std::string &device) { + ie.UnregisterPlugin(device); +} + +void InferAPI1::read_network(const std::string &model) { + cnnNetwork = ie.ReadNetwork(model); + inputsInfo = cnnNetwork.getInputsInfo(); + InferenceEngine::ICNNNetwork::InputShapes shapes = cnnNetwork.getInputShapes(); + for (const auto &input: inputsInfo) { + original_batch_size = shapes[input.first][0]; + + } + original_batch_size = original_batch_size ? original_batch_size : 1; +} + +void InferAPI1::load_network(const std::string &device) { + exeNetwork = ie.LoadNetwork(cnnNetwork, device); +} + +void InferAPI1::create_infer_request() { + inferRequest = exeNetwork.CreateInferRequest(); +} + +void InferAPI1::prepare_input() { + auto batchSize = cnnNetwork.getBatchSize(); + batchSize = batchSize != 0 ? batchSize : 1; + fillBlobs(inferRequest, exeNetwork.GetInputsInfo(), batchSize); +} + +void InferAPI1::infer() { + inferRequest.Infer(); + for (auto &output: outputInfo) { + InferenceEngine::Blob::Ptr outputBlob = inferRequest.GetBlob(output.first); + } +} + +void InferAPI1::change_batch_size(int multiplier, int cur_iter) { + bool doReshape = false; + auto shapes = cnnNetwork.getInputShapes(); + int new_batch_size = ((cur_iter % 2) == 0) ? original_batch_size * multiplier : original_batch_size; + for (const auto &input: inputsInfo) { + int batchIndex = -1; + auto layout = input.second->getTensorDesc().getLayout(); + if ((layout == InferenceEngine::Layout::NCHW) || (layout == InferenceEngine::Layout::NCDHW) || + (layout == InferenceEngine::Layout::NHWC) || (layout == InferenceEngine::Layout::NDHWC) || + (layout == InferenceEngine::Layout::NC)) { + batchIndex = 0; + } else if (layout == InferenceEngine::CN) { + batchIndex = 1; + } + if (batchIndex != -1) { + shapes[input.first][batchIndex] = new_batch_size; + doReshape = true; + } + } + if (doReshape) + cnnNetwork.reshape(shapes); + else + throw std::logic_error("Reshape wasn't applied for a model."); +} + +void InferAPI1::set_input_params(const std::string &model) { + cnnNetwork = ie.ReadNetwork(model); + InferenceEngine::InputsDataMap inputInfo(cnnNetwork.getInputsInfo()); + for (auto &input: inputInfo) { + input.second->getPreProcess().setResizeAlgorithm(InferenceEngine::NO_RESIZE); + input.second->setPrecision(InferenceEngine::Precision::U8); + if (input.second->getInputData()->getTensorDesc().getDims().size() == 4) + input.second->setLayout(InferenceEngine::Layout::NCHW); + else if (input.second->getInputData()->getTensorDesc().getDims().size() == 2) + input.second->setLayout(InferenceEngine::Layout::NC); + else + throw std::logic_error("Setting of input parameters wasn't applied for a model."); + } +} + +void InferAPI1::set_config(const std::string &device, const std::string &property, int nstreams) { + config[device + "_" + property] = std::to_string(nstreams); + ie.SetConfig(config, device); +} + +unsigned int InferAPI1::get_property(const std::string &name) { + return exeNetwork.GetMetric(name).as(); +} + + +InferAPI2::InferAPI2() = default; + +void InferAPI2::load_plugin(const std::string &device) { + ie.get_versions(device); +} + +void InferAPI2::unload_plugin(const std::string &device) { + ie.unload_plugin(device); +} + +void InferAPI2::read_network(const std::string &model) { + network = ie.read_model(model); + inputs = network->inputs(); + + for (const auto &input: inputs) { + auto tensor_shape = input.get_shape(); + original_batch_size = tensor_shape[0]; + original_batch_size = original_batch_size ? original_batch_size : 1; + } +} + +void InferAPI2::load_network(const std::string &device) { + compiled_model = ie.compile_model(network, device); +} + +void InferAPI2::create_infer_request() { + infer_request = compiled_model.create_infer_request(); +} + +void InferAPI2::prepare_input() { + fillTensors(infer_request, inputs); +} + +void InferAPI2::infer() { + infer_request.infer(); + for (size_t i = 0; i < outputs.size(); ++i) { + const auto &output_tensor = infer_request.get_output_tensor(i); + } +} + +void InferAPI2::change_batch_size(int multiplier, int cur_iter) { + int new_batch_size = ((cur_iter % 2) == 0) ? original_batch_size * multiplier : original_batch_size; + for (auto &input: inputs) { + auto tensor_shape = input.get_shape(); + tensor_shape[0] = new_batch_size; + network->reshape({{input.get_any_name(), tensor_shape}}); + } +} + +void InferAPI2::set_config(const std::string &device, const std::string &property, int nstreams) { + config[device + "_" + property] = std::to_string(nstreams); + ie.set_property(device, config); +} + +unsigned int InferAPI2::get_property(const std::string &name) { + return compiled_model.get_property(name).as(); +} + +void InferAPI2::set_input_params(const std::string &model) { + network = ie.read_model(model); + inputs = network->inputs(); + auto ppp = ov::preprocess::PrePostProcessor(network); + for (size_t i = 0; i < inputs.size(); ++i) { + auto &input_info = ppp.input(i); + if (inputs[i].get_shape().size() == 4) { + input_info.tensor().set_element_type(ov::element::u8).set_layout("NCHW"); + input_info.model().set_layout("NCHW"); + ppp.input(i).preprocess().resize(ov::preprocess::ResizeAlgorithm::RESIZE_LINEAR); + } else if (inputs[i].get_shape().size() == 2) { + input_info.tensor().set_element_type(ov::element::u8).set_layout("NC"); + input_info.model().set_layout("NC"); + } else { + throw std::logic_error("Setting of input parameters wasn't applied for a model."); + } + } + network = ppp.build(); + inputs = network->inputs(); +} + +std::shared_ptr create_infer_api_wrapper(const int &api_version) { + if (api_version == 1) { + return std::make_shared(InferAPI1()); + } else if (api_version == 2) { + return std::make_shared(InferAPI2()); + } else { + throw std::logic_error("Unsupported API version"); + } +} diff --git a/tests/stress_tests/common/infer_api/infer_api.h b/tests/stress_tests/common/infer_api/infer_api.h new file mode 100644 index 00000000000..94ae2360df4 --- /dev/null +++ b/tests/stress_tests/common/infer_api/infer_api.h @@ -0,0 +1,111 @@ +// Copyright (C) 2018-2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include "openvino/runtime/core.hpp" +#include "common_utils.h" + + +class InferApiBase { +public: + virtual void load_plugin(const std::string &device) = 0; + + virtual void unload_plugin(const std::string &device) = 0; + + virtual void read_network(const std::string &model) = 0; + + virtual void load_network(const std::string &device) = 0; + + virtual void create_infer_request() = 0; + + virtual void infer() = 0; + + virtual void prepare_input() = 0; + + virtual void change_batch_size(int multiplier, int cur_iter) = 0; + + virtual void set_input_params(const std::string &model) = 0; + + virtual void set_config(const std::string &device, const std::string &property, int nstreams) = 0; + + virtual unsigned int get_property(const std::string &name) = 0; +}; + +class InferAPI1 : public InferApiBase { +public: + InferAPI1(); + + void load_plugin(const std::string &device) override; + + void unload_plugin(const std::string &device) override; + + void read_network(const std::string &model) override; + + void load_network(const std::string &device) override; + + void create_infer_request() override; + + void prepare_input() override; + + void infer() override; + + void change_batch_size(int multiplier, int cur_iter) override; + + void set_input_params(const std::string &model) override; + + void set_config(const std::string &device, const std::string &property, int nstreams) override; + + unsigned int get_property(const std::string &name) override; + +private: + InferenceEngine::Core ie; + InferenceEngine::CNNNetwork cnnNetwork; + InferenceEngine::ExecutableNetwork exeNetwork; + InferenceEngine::InferRequest inferRequest; + InferenceEngine::InputsDataMap inputsInfo; + InferenceEngine::OutputsDataMap outputInfo; + int original_batch_size; + std::map config; +}; + +class InferAPI2 : public InferApiBase { +public: + InferAPI2(); + + void load_plugin(const std::string &device) override; + + void unload_plugin(const std::string &device) override; + + void read_network(const std::string &model) override; + + void load_network(const std::string &device) override; + + void create_infer_request() override; + + void prepare_input() override; + + void infer() override; + + void change_batch_size(int multiplier, int cur_iter) override; + + void set_input_params(const std::string &model) override; + + void set_config(const std::string &device, const std::string &property, int nstreams) override; + + unsigned int get_property(const std::string &name) override; + +private: + ov::Core ie; + std::shared_ptr network; + ov::CompiledModel compiled_model; + ov::InferRequest infer_request; + std::vector> inputs; + std::vector> outputs; + int original_batch_size; + std::map config; +}; + +std::shared_ptr create_infer_api_wrapper(const int &api_version); diff --git a/tests/stress_tests/common/tests_utils.cpp b/tests/stress_tests/common/tests_utils.cpp index 7de24b07a60..68e0d5f5ff8 100644 --- a/tests/stress_tests/common/tests_utils.cpp +++ b/tests/stress_tests/common/tests_utils.cpp @@ -19,28 +19,32 @@ std::vector generateTestsParams(std::initializer_list fie std::vector tests_cases; const pugi::xml_document &test_config = Environment::Instance().getTestConfig(); - std::vector processes, threads, iterations; + std::vector processes, threads, iterations, api_versions; std::vector devices, models, models_names, precisions; pugi::xml_node values; - for (auto field = fields.begin(); field != fields.end(); field++) { - if (*field == "processes") { + for (const auto &field: fields) { + if (field == "processes") { values = test_config.child("attributes").child("processes"); for (pugi::xml_node val = values.first_child(); val; val = val.next_sibling()) processes.push_back(val.text().as_int()); - } else if (*field == "threads") { + } else if (field == "threads") { values = test_config.child("attributes").child("threads"); for (pugi::xml_node val = values.first_child(); val; val = val.next_sibling()) threads.push_back(val.text().as_int()); - } else if (*field == "iterations") { + } else if (field == "iterations") { values = test_config.child("attributes").child("iterations"); for (pugi::xml_node val = values.first_child(); val; val = val.next_sibling()) iterations.push_back(val.text().as_int()); - } else if (*field == "devices") { + } else if (field == "devices") { values = test_config.child("attributes").child("devices"); for (pugi::xml_node val = values.first_child(); val; val = val.next_sibling()) - devices.push_back(val.text().as_string()); - } else if (*field == "models") { + devices.emplace_back(val.text().as_string()); + } else if (field == "api_versions") { + values = test_config.child("attributes").child("api_versions"); + for (pugi::xml_node val = values.first_child(); val; val = val.next_sibling()) + api_versions.push_back(val.text().as_int()); + } else if (field == "models") { values = test_config.child("attributes").child("models"); for (pugi::xml_node val = values.first_child(); val; val = val.next_sibling()) { std::string full_path = val.attribute("full_path").as_string(); @@ -62,18 +66,20 @@ std::vector generateTestsParams(std::initializer_list fie processes = !processes.empty() ? processes : std::vector{1}; threads = !threads.empty() ? threads : std::vector{1}; iterations = !iterations.empty() ? iterations : std::vector{1}; + api_versions = !api_versions.empty() ? api_versions : std::vector{1, 2}; devices = !devices.empty() ? devices : std::vector{"NULL"}; models = !models.empty() ? models : std::vector{"NULL"}; precisions = !precisions.empty() ? precisions : std::vector{"NULL"}; models_names = !models_names.empty() ? models_names : std::vector{"NULL"}; - for (auto &numprocesses : processes) - for (auto &numthreads : threads) - for (auto &numiters : iterations) - for (auto &device : devices) - for (int i = 0; i < models.size(); i++) - tests_cases.push_back(TestCase(numprocesses, numthreads, numiters, device, models[i], - models_names[i], precisions[i])); + for (auto &numprocesses: processes) + for (auto &numthreads: threads) + for (auto &numiters: iterations) + for (auto &api_version: api_versions) + for (auto &device: devices) + for (int i = 0; i < models.size(); i++) + tests_cases.emplace_back(numprocesses, numthreads, numiters, api_version, device, models[i], + models_names[i], precisions[i]); return tests_cases; } @@ -93,6 +99,7 @@ std::vector generateTestsParamsMemLeaks() { numprocesses = device.attribute("processes").as_int(1); numthreads = device.attribute("threads").as_int(1); numiterations = device.attribute("iterations").as_int(1); + std::vector api_versions{1, 2}; std::vector> models; @@ -104,13 +111,15 @@ std::vector generateTestsParamsMemLeaks() { "One of the 'model' records from test config doesn't contain 'full_path' or 'path' attributes"); std::string name = model.attribute("name").as_string(); std::string precision = model.attribute("precision").as_string(); - std::map model_map{{"name", name}, - {"path", path}, + std::map model_map{{"name", name}, + {"path", path}, {"full_path", full_path}, {"precision", precision}}; models.push_back(model_map); } - tests_cases.push_back(MemLeaksTestCase(numprocesses, numthreads, numiterations, device_name, models)); + for (auto api_version: api_versions) { + tests_cases.emplace_back(numprocesses, numthreads, numiterations, api_version, device_name, models); + } } return tests_cases; @@ -124,15 +133,16 @@ std::string getTestCaseNameMemLeaks(const testing::TestParamInfo &tests_pipeline, const TestCase ¶ms) { - tests_pipeline(params.model, params.device, params.numiters); +void test_wrapper(const std::function &tests_pipeline, + const TestCase ¶ms) { + tests_pipeline(params.model, params.device, params.numiters, params.api_version); } -void _runTest(const std::function &tests_pipeline, const TestCase ¶ms) { +void _runTest(const std::function &tests_pipeline, const TestCase ¶ms) { run_in_threads(params.numthreads, test_wrapper, tests_pipeline, params); } -void runTest(const std::function &tests_pipeline, const TestCase ¶ms) { +void runTest(const std::function &tests_pipeline, const TestCase ¶ms) { #if DEBUG_MODE tests_pipeline(params.model, params.device, params.numiters); #else diff --git a/tests/stress_tests/common/tests_utils.h b/tests/stress_tests/common/tests_utils.h index ce683b18336..bb362d1a42f 100644 --- a/tests/stress_tests/common/tests_utils.h +++ b/tests/stress_tests/common/tests_utils.h @@ -13,7 +13,9 @@ #include -enum TestStatus { TEST_NOT_STARTED = 0, TEST_FAILED, TEST_OK }; +enum TestStatus { + TEST_NOT_STARTED = 0, TEST_FAILED, TEST_OK +}; using TestResult = std::pair; @@ -22,6 +24,7 @@ public: int numprocesses; int numthreads; int numiters; + int api_version; std::string precision; std::string test_case_name; std::string model_name; @@ -29,10 +32,10 @@ public: protected: // Replace non-alphabetic/numeric symbols with "_" to prevent logging errors - std::string update_item_for_name(const std::string &item) { + static std::string update_item_for_name(const std::string &item) { std::string _item(item); - for (std::string::size_type index = 0; index < _item.size(); ++index) { - if (!isalnum(_item[index]) && _item[index] != '_') _item[index] = '_'; + for (char &index: _item) { + if (!isalnum(index) && index != '_') index = '_'; } return _item; } @@ -42,13 +45,15 @@ class TestCase : public TestCaseBase { public: std::string model; - TestCase(int _numprocesses, int _numthreads, int _numiters, std::string _device, const std::string &_model, + TestCase(int _numprocesses, int _numthreads, int _numiters, int _api_version, std::string _device, + const std::string &_model, const std::string &_model_name, const std::string &_precision) { - numprocesses = _numprocesses, numthreads = _numthreads, numiters = _numiters, device = _device, model = _model, - model_name = _model_name, precision = _precision; + numprocesses = _numprocesses, numthreads = _numthreads, numiters = _numiters, api_version = _api_version, + device = _device, model = _model, model_name = _model_name, precision = _precision; test_case_name = "Numprocesses_" + std::to_string(numprocesses) + "_Numthreads_" + std::to_string(numthreads) + "_Numiters_" + std::to_string(numiters) + "_Device_" + update_item_for_name(device) + - "_Precision_" + update_item_for_name(precision) + "_Model_" + update_item_for_name(model_name); + "_Precision_" + update_item_for_name(precision) + "_Model_" + update_item_for_name(model_name) + + "_API_" + std::to_string(api_version); } }; @@ -56,12 +61,13 @@ class MemLeaksTestCase : public TestCaseBase { public: std::vector> models; - MemLeaksTestCase(int _numprocesses, int _numthreads, int _numiters, std::string _device, + MemLeaksTestCase(int _numprocesses, int _numthreads, int _numiters, int _api_version, std::string _device, std::vector> _models) { - numprocesses = _numprocesses, numthreads = _numthreads, numiters = _numiters, device = _device, - models = _models; + numprocesses = _numprocesses, numthreads = _numthreads, numiters = _numiters, api_version = _api_version, + device = _device, models = _models; test_case_name = "Numprocesses_" + std::to_string(numprocesses) + "_Numthreads_" + std::to_string(numthreads) + - "_Numiters_" + std::to_string(numiters) + "_Device_" + update_item_for_name(device); + "_Numiters_" + std::to_string(numiters) + "_Device_" + update_item_for_name(device) + "_API_" + + std::to_string(api_version); for (int i = 0; i < models.size(); i++) { test_case_name += "_Model" + std::to_string(i + 1) + "_" + update_item_for_name(models[i]["name"]) + "_" + update_item_for_name(models[i]["precision"]); @@ -76,7 +82,9 @@ private: bool _collect_results_only = false; Environment() = default; + Environment(const Environment &) = delete; + Environment &operator=(const Environment &) = delete; public: @@ -86,14 +94,21 @@ public: } const pugi::xml_document &getTestConfig(); + void setTestConfig(const pugi::xml_document &test_config); }; std::vector generateTestsParams(std::initializer_list items); + std::vector generateTestsParamsMemLeaks(); + std::string getTestCaseName(const testing::TestParamInfo &obj); + std::string getTestCaseNameMemLeaks(const testing::TestParamInfo &obj); -void runTest(const std::function &tests_pipeline, const TestCase ¶ms); -void _runTest(const std::function &tests_pipeline, const TestCase ¶ms); -void test_wrapper(const std::function &tests_pipeline, const TestCase ¶ms); +void runTest(const std::function &tests_pipeline, const TestCase ¶ms); + +void _runTest(const std::function &tests_pipeline, const TestCase ¶ms); + +void test_wrapper(const std::function &tests_pipeline, + const TestCase ¶ms); diff --git a/tests/stress_tests/memcheck_tests/tests.cpp b/tests/stress_tests/memcheck_tests/tests.cpp index 9d1bd361afd..7a5da603e5e 100644 --- a/tests/stress_tests/memcheck_tests/tests.cpp +++ b/tests/stress_tests/memcheck_tests/tests.cpp @@ -4,6 +4,7 @@ #include "tests_utils.h" #include "../common/tests_utils.h" +#include "../common/infer_api/infer_api.h" #include "common_utils.h" #include "../common/managers/thread_manager.h" #include "tests_pipelines/tests_pipelines.h" @@ -11,8 +12,7 @@ #include #include - -using namespace InferenceEngine; +#include class MemCheckTestSuite : public ::testing::TestWithParam { @@ -21,11 +21,10 @@ public: TestReferences test_refs; void SetUp() override { - const ::testing::TestInfo* const test_info = ::testing::UnitTest::GetInstance()->current_test_info(); + const ::testing::TestInfo *const test_info = ::testing::UnitTest::GetInstance()->current_test_info(); test_name = std::string(test_info->name()).substr(0, std::string(test_info->name()).find('/')); - //const std::string full_test_name = std::string(test_info->test_case_name()) + "." + std::string(test_info->name()); - const auto& test_params = GetParam(); + const auto &test_params = GetParam(); model = test_params.model; model_name = test_params.model_name; device = test_params.device; @@ -33,13 +32,13 @@ public: test_refs.collect_vm_values_for_test(test_name, test_params); EXPECT_GT(test_refs.references[VMSIZE], 0) << "Reference value of VmSize is less than 0. Value: " - << test_refs.references[VMSIZE]; + << test_refs.references[VMSIZE]; EXPECT_GT(test_refs.references[VMPEAK], 0) << "Reference value of VmPeak is less than 0. Value: " - << test_refs.references[VMPEAK]; + << test_refs.references[VMPEAK]; EXPECT_GT(test_refs.references[VMRSS], 0) << "Reference value of VmRSS is less than 0. Value: " - << test_refs.references[VMRSS]; + << test_refs.references[VMRSS]; EXPECT_GT(test_refs.references[VMHWM], 0) << "Reference value of VmHWM is less than 0. Value: " - << test_refs.references[VMHWM]; + << test_refs.references[VMHWM]; } }; @@ -48,17 +47,15 @@ TEST_P(MemCheckTestSuite, create_exenetwork) { log_info("Create ExecutableNetwork from network: \"" << model << "\" with precision: \"" << precision << "\" for device: \"" << device << "\""); - auto test_pipeline = [&]{ - MemCheckPipeline memCheckPipeline; - - Core ie; - ie.GetVersions(device); - CNNNetwork cnnNetwork = ie.ReadNetwork(model); - ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, device); - + auto test_params = GetParam(); + MemCheckPipeline memCheckPipeline; + auto test_pipeline = [&] { + auto ie_api_wrapper = create_infer_api_wrapper(test_params.api_version); + ie_api_wrapper->load_plugin(device); + ie_api_wrapper->read_network(model); + ie_api_wrapper->load_network(device); log_info("Memory consumption after LoadNetwork:"); memCheckPipeline.record_measures(test_name); - log_debug(memCheckPipeline.get_reference_record_for_test(test_name, model_name, precision, device)); return memCheckPipeline.measure(); }; @@ -71,25 +68,16 @@ TEST_P(MemCheckTestSuite, infer_request_inference) { log_info("Inference of InferRequest from network: \"" << model << "\" with precision: \"" << precision << "\" for device: \"" << device << "\""); - auto test_pipeline = [&]{ - MemCheckPipeline memCheckPipeline; - - Core ie; - ie.GetVersions(device); - CNNNetwork cnnNetwork = ie.ReadNetwork(model); - ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, device); - InferRequest inferRequest = exeNetwork.CreateInferRequest(); - - auto batchSize = cnnNetwork.getBatchSize(); - batchSize = batchSize != 0 ? batchSize : 1; - const ConstInputsDataMap inputsInfo(exeNetwork.GetInputsInfo()); - fillBlobs(inferRequest, inputsInfo, batchSize); - - inferRequest.Infer(); - OutputsDataMap output_info(cnnNetwork.getOutputsInfo()); - for (auto &output : output_info) - Blob::Ptr outputBlob = inferRequest.GetBlob(output.first); - + auto test_params = GetParam(); + MemCheckPipeline memCheckPipeline; + auto test_pipeline = [&] { + auto ie_api_wrapper = create_infer_api_wrapper(test_params.api_version); + ie_api_wrapper->load_plugin(device); + ie_api_wrapper->read_network(model); + ie_api_wrapper->load_network(device); + ie_api_wrapper->create_infer_request(); + ie_api_wrapper->prepare_input(); + ie_api_wrapper->infer(); log_info("Memory consumption after Inference:"); memCheckPipeline.record_measures(test_name); @@ -103,11 +91,12 @@ TEST_P(MemCheckTestSuite, infer_request_inference) { // tests_pipelines/tests_pipelines.cpp INSTANTIATE_TEST_SUITE_P(MemCheckTests, MemCheckTestSuite, - ::testing::ValuesIn( - generateTestsParams({"devices", "models"})), - getTestCaseName); + ::testing::ValuesIn( + generateTestsParams({"devices", "models"})), + getTestCaseName); TEST_P(MemCheckTestSuite, inference_with_streams) { + auto test_params = GetParam(); const auto nstreams = 2; log_info("Inference of InferRequest from network: \"" << model << "\" with precision: \"" << precision @@ -118,37 +107,23 @@ TEST_P(MemCheckTestSuite, inference_with_streams) { auto test_pipeline = [&] { MemCheckPipeline memCheckPipeline; - - std::map config; - const std::string key = device + "_THROUGHPUT_STREAMS"; - config[device + "_THROUGHPUT_STREAMS"] = std::to_string(nstreams); - - Core ie; - ie.GetVersions(device); - ie.SetConfig(config, device); - - InferRequest inferRequest; - - CNNNetwork cnnNetwork = ie.ReadNetwork(model); - ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, device); - auto batchSize = cnnNetwork.getBatchSize(); - batchSize = batchSize != 0 ? batchSize : 1; - const ConstInputsDataMap inputsInfo(exeNetwork.GetInputsInfo()); - unsigned int nireq = nstreams; + auto ie_api_wrapper = create_infer_api_wrapper(test_params.api_version); + ie_api_wrapper->load_plugin(device); + ie_api_wrapper->set_config(device, "THROUGHPUT_STREAMS", nstreams); + ie_api_wrapper->read_network(model); + ie_api_wrapper->load_network(device); try { - nireq = exeNetwork.GetMetric(METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS)).as(); + nireq = ie_api_wrapper->get_property(METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS)); } catch (const std::exception &ex) { log_err("Failed to query OPTIMAL_NUMBER_OF_INFER_REQUESTS"); } - for (int counter = 0; counter < nireq; counter++) { - inferRequest = exeNetwork.CreateInferRequest(); - fillBlobs(inferRequest, inputsInfo, batchSize); - inferRequest.Infer(); - OutputsDataMap output_info(cnnNetwork.getOutputsInfo()); - for (auto &output : output_info) - Blob::Ptr outputBlob = inferRequest.GetBlob(output.first); + for (int counter = 0; counter < nireq; counter++) { + ie_api_wrapper->create_infer_request(); + ie_api_wrapper->prepare_input(); + + ie_api_wrapper->infer(); } log_info("Memory consumption after Inference with streams: \"" << nstreams diff --git a/tests/stress_tests/memcheck_tests/tests_pipelines/tests_pipelines.cpp b/tests/stress_tests/memcheck_tests/tests_pipelines/tests_pipelines.cpp index b18c78eec8e..f95f46fd6bd 100644 --- a/tests/stress_tests/memcheck_tests/tests_pipelines/tests_pipelines.cpp +++ b/tests/stress_tests/memcheck_tests/tests_pipelines/tests_pipelines.cpp @@ -39,7 +39,7 @@ std::array MemCheckPipeline::measure() { return measures; } -void MemCheckPipeline::record_measures(const std::string & id) { +void MemCheckPipeline::record_measures(const std::string &id) { std::array measures = measure(); log_debug("[ MEASURE ] " << MEMCHECK_DELIMITER << id); log_info(util::get_measure_values_headers(MEMCHECK_DELIMITER)); @@ -62,7 +62,7 @@ std::string MemCheckPipeline::get_reference_record_for_test(std::string test_nam return ss.str(); } -TestResult common_test_pipeline(const std::function()>& test_pipeline, +TestResult common_test_pipeline(const std::function()> &test_pipeline, const std::array &references) { log_info("Reference values of virtual memory consumption:"); log_info(util::get_measure_values_headers(MEMCHECK_DELIMITER)); diff --git a/tests/stress_tests/memcheck_tests/tests_pipelines/tests_pipelines.h b/tests/stress_tests/memcheck_tests/tests_pipelines/tests_pipelines.h index 41e18ce5b2a..5d49eb35c15 100644 --- a/tests/stress_tests/memcheck_tests/tests_pipelines/tests_pipelines.h +++ b/tests/stress_tests/memcheck_tests/tests_pipelines/tests_pipelines.h @@ -28,6 +28,7 @@ private: * @brief Measures values at the current point of time */ std::array _measure(); + public: /** * @brief Constructs MemCheckPipeline object and @@ -45,7 +46,7 @@ public: * @brief Measures values and records aligned measurements using provided identifier * provided identifier */ - void record_measures(const std::string & id); + void record_measures(const std::string &id); /** * @brief Prepares string used for fast generation of file with references @@ -54,6 +55,6 @@ public: std::string precision, std::string target_device); }; -TestResult common_test_pipeline(const std::function()>& test_pipeline, +TestResult common_test_pipeline(const std::function()> &test_pipeline, const std::array &references); // tests_pipelines/tests_pipelines.cpp diff --git a/tests/stress_tests/memcheck_tests/tests_utils.h b/tests/stress_tests/memcheck_tests/tests_utils.h index 2489c4e8359..fe2e3244955 100644 --- a/tests/stress_tests/memcheck_tests/tests_utils.h +++ b/tests/stress_tests/memcheck_tests/tests_utils.h @@ -10,21 +10,23 @@ #include // Measure values -enum MeasureValue { VMRSS = 0, VMHWM, VMSIZE, VMPEAK, THREADS, MeasureValueMax }; +enum MeasureValue { + VMRSS = 0, VMHWM, VMSIZE, VMPEAK, THREADS, MeasureValueMax +}; // Measure values headers -const std::array MeasureValueHeader { "VMRSS", "VMHWM", "VMSIZE", "VMPEAK", "THREADS" }; +const std::array MeasureValueHeader{"VMRSS", "VMHWM", "VMSIZE", "VMPEAK", "THREADS"}; namespace util { - template - static std::string get_measure_values_as_str(const std::array & array, - const std::string & delimiter = "\t\t") { + template + static std::string get_measure_values_as_str(const std::array &array, + const std::string &delimiter = "\t\t") { std::string str = std::to_string(*array.begin()); for (auto it = array.begin() + 1; it != array.end(); it++) str += delimiter + std::to_string(*it); return str; } - static std::string get_measure_values_headers(const std::string & delimiter = "\t\t") { + static std::string get_measure_values_headers(const std::string &delimiter = "\t\t") { std::string str = *MeasureValueHeader.begin(); for (auto it = MeasureValueHeader.begin() + 1; it != MeasureValueHeader.end(); it++) str += delimiter + *it; @@ -35,16 +37,20 @@ namespace util { class MemCheckEnvironment { private: pugi::xml_document _refs_config; + MemCheckEnvironment() = default; - MemCheckEnvironment(const MemCheckEnvironment&) = delete; - MemCheckEnvironment& operator=(const MemCheckEnvironment&) = delete; + + MemCheckEnvironment(const MemCheckEnvironment &) = delete; + + MemCheckEnvironment &operator=(const MemCheckEnvironment &) = delete; + public: - static MemCheckEnvironment& Instance(){ + static MemCheckEnvironment &Instance() { static MemCheckEnvironment env; return env; } - const pugi::xml_document & getRefsConfig() { + const pugi::xml_document &getRefsConfig() { return _refs_config; } @@ -60,7 +66,7 @@ private: public: std::array references; - TestReferences () { + TestReferences() { std::fill(references.begin(), references.end(), -1); // Parse RefsConfig from MemCheckEnvironment diff --git a/tests/stress_tests/memleaks_tests/tests.cpp b/tests/stress_tests/memleaks_tests/tests.cpp index 8e5b13aca21..bd111a33e4d 100644 --- a/tests/stress_tests/memleaks_tests/tests.cpp +++ b/tests/stress_tests/memleaks_tests/tests.cpp @@ -3,21 +3,22 @@ // #include "../common/managers/thread_manager.h" -#include "../common/tests_utils.h" -#include "common_utils.h" +#include "../common/infer_api/infer_api.h" #include "tests_pipelines/tests_pipelines.h" -#include #include using namespace InferenceEngine; -class MemLeaksTestSuiteNoModel : public ::testing::TestWithParam {}; +class MemLeaksTestSuiteNoModel : public ::testing::TestWithParam { +}; -class MemLeaksTestSuiteNoDevice : public ::testing::TestWithParam {}; +class MemLeaksTestSuiteNoDevice : public ::testing::TestWithParam { +}; -class MemLeaksTestSuite : public ::testing::TestWithParam {}; +class MemLeaksTestSuite : public ::testing::TestWithParam { +}; inline void test_runner(int numthreads, const std::function &test_function) { ThreadManager thr_manager; @@ -29,7 +30,7 @@ inline void test_runner(int numthreads, const std::function &test_ for (int i = 0; i < numthreads; i++) { EXPECT_EQ(statuses[i], ManagerStatus::FINISHED_SUCCESSFULLY) - << "[Thread " << i << "] Thread not finished successfully"; + << "[Thread " << i << "] Thread not finished successfully"; EXPECT_EQ(results[i].first, TestStatus::TEST_OK) << "[Thread " << i << "] " << results[i].second; } } @@ -38,7 +39,7 @@ inline void test_runner(int numthreads, const std::function &test_ TEST_P(MemLeaksTestSuiteNoModel, load_unload_plugin) { auto test_params = GetParam(); - std::vector> pipeline = {load_unload_plugin(test_params.device)}; + std::vector> pipeline = {load_unload_plugin(test_params.device, test_params.api_version)}; auto test = [&] { log_info("Load/unload plugin for \"" << test_params.device << "\" device" << " for " << test_params.numiters << " times"); @@ -51,8 +52,9 @@ TEST_P(MemLeaksTestSuiteNoDevice, read_network) { auto test_params = GetParam(); std::vector> pipeline; + pipeline.reserve(test_params.models.size()); for (int i = 0; i < test_params.models.size(); i++) { - pipeline.push_back(read_cnnnetwork(test_params.models[i]["full_path"])); + pipeline.push_back(read_cnnnetwork(test_params.models[i]["full_path"], test_params.api_version)); } auto test = [&] { log_info("Read networks: " << test_params.model_name << " for " << test_params.numiters << " times"); @@ -65,8 +67,9 @@ TEST_P(MemLeaksTestSuiteNoDevice, cnnnetwork_reshape_batch_x2) { auto test_params = GetParam(); std::vector> pipeline; + pipeline.reserve(test_params.models.size()); for (int i = 0; i < test_params.models.size(); i++) { - pipeline.push_back(cnnnetwork_reshape_batch_x2(test_params.models[i]["full_path"])); + pipeline.push_back(cnnnetwork_reshape_batch_x2(test_params.models[i]["full_path"], i, test_params.api_version)); } auto test = [&] { log_info("Reshape to batch*=2 of CNNNetworks created from networks: " << test_params.model_name << " for " @@ -80,8 +83,9 @@ TEST_P(MemLeaksTestSuiteNoDevice, set_input_params) { auto test_params = GetParam(); std::vector> pipeline; + pipeline.reserve(test_params.models.size()); for (int i = 0; i < test_params.models.size(); i++) { - pipeline.push_back(set_input_params(test_params.models[i]["full_path"])); + pipeline.push_back(set_input_params(test_params.models[i]["full_path"], test_params.api_version)); } auto test = [&] { log_info("Apply preprocessing for CNNNetworks from networks: " << test_params.model_name << " for " @@ -93,16 +97,18 @@ TEST_P(MemLeaksTestSuiteNoDevice, set_input_params) { TEST_P(MemLeaksTestSuite, recreate_exenetwork) { auto test_params = GetParam(); - Core ie; std::vector> pipeline; + auto ie_wrapper = create_infer_api_wrapper(test_params.api_version); + pipeline.reserve(test_params.models.size()); for (int i = 0; i < test_params.models.size(); i++) { - pipeline.push_back(recreate_exenetwork(ie, test_params.models[i]["full_path"], test_params.device)); + pipeline.push_back(recreate_compiled_model(ie_wrapper, test_params.models[i]["full_path"], test_params.device, + test_params.api_version)); } auto test = [&] { log_info("Recreate ExecutableNetworks within existing InferenceEngine::Core from networks: " - << test_params.model_name << " for \"" << test_params.device << "\" device for " - << test_params.numiters << " times"); + << test_params.model_name << " for \"" << test_params.device << "\" device for " + << test_params.numiters << " times"); return common_test_pipeline(pipeline, test_params.numiters); }; test_runner(test_params.numthreads, test); @@ -110,19 +116,17 @@ TEST_P(MemLeaksTestSuite, recreate_exenetwork) { TEST_P(MemLeaksTestSuite, recreate_infer_request) { auto test_params = GetParam(); - Core ie; std::vector> pipeline; - std::vector exeNetworks; + auto ie_wrapper = create_infer_api_wrapper(test_params.api_version); - int n_models = test_params.models.size(); - exeNetworks.reserve(n_models); + size_t n_models = test_params.models.size(); for (int i = 0; i < n_models; i++) { - CNNNetwork cnnNetwork = ie.ReadNetwork(test_params.models[i]["full_path"]); - ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, test_params.device); - exeNetworks.push_back(exeNetwork); - pipeline.push_back(recreate_infer_request(exeNetworks[i])); + ie_wrapper->read_network(test_params.models[i]["full_path"]); + ie_wrapper->load_network(test_params.device); + pipeline.push_back(recreate_infer_request(ie_wrapper)); } + auto test = [&] { log_info("Create InferRequests from networks: " << test_params.model_name << " for \"" << test_params.device << "\" device for " << test_params.numiters << " times"); @@ -133,28 +137,18 @@ TEST_P(MemLeaksTestSuite, recreate_infer_request) { TEST_P(MemLeaksTestSuite, reinfer_request_inference) { auto test_params = GetParam(); - Core ie; std::vector> pipeline; - std::vector infer_requests; - std::vector outputs_info; - - int n_models = test_params.models.size(); - infer_requests.reserve(n_models); - outputs_info.reserve(n_models); + auto ie_wrapper = create_infer_api_wrapper(test_params.api_version); + size_t n_models = test_params.models.size(); for (int i = 0; i < n_models; i++) { - CNNNetwork cnnNetwork = ie.ReadNetwork(test_params.models[i]["full_path"]); - ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, test_params.device); - InferRequest infer_request = exeNetwork.CreateInferRequest(); - infer_requests.push_back(infer_request); - OutputsDataMap output_info(cnnNetwork.getOutputsInfo()); - outputs_info.push_back(output_info); - auto batchSize = cnnNetwork.getBatchSize(); - batchSize = batchSize != 0 ? batchSize : 1; - const InferenceEngine::ConstInputsDataMap inputsInfo(exeNetwork.GetInputsInfo()); - fillBlobs(infer_requests[i], inputsInfo, batchSize); - pipeline.push_back(reinfer_request_inference(infer_requests[i], outputs_info[i])); + ie_wrapper->read_network(test_params.models[i]["full_path"]); + ie_wrapper->load_network(test_params.device); + ie_wrapper->create_infer_request(); + ie_wrapper->prepare_input(); + pipeline.push_back(reinfer_request_inference(ie_wrapper)); } + auto test = [&] { log_info("Inference of InferRequests from networks: " << test_params.model_name << " for \"" << test_params.device << "\" device for " @@ -167,8 +161,10 @@ TEST_P(MemLeaksTestSuite, reinfer_request_inference) { TEST_P(MemLeaksTestSuite, infer_request_inference) { auto test_params = GetParam(); std::vector> pipeline; + pipeline.reserve(test_params.models.size()); for (int i = 0; i < test_params.models.size(); i++) { - pipeline.push_back(infer_request_inference(test_params.models[i]["full_path"], test_params.device)); + pipeline.push_back(infer_request_inference(test_params.models[i]["full_path"], test_params.device, + test_params.api_version)); } auto test = [&] { log_info("Inference of InferRequests from networks: " << test_params.model_name << " for \"" @@ -183,8 +179,10 @@ TEST_P(MemLeaksTestSuite, inference_with_streams) { auto test_params = GetParam(); const auto nstreams = 2; std::vector> pipeline; + pipeline.reserve(test_params.models.size()); for (int i = 0; i < test_params.models.size(); i++) { - pipeline.push_back(inference_with_streams(test_params.models[i]["full_path"], test_params.device, nstreams)); + pipeline.push_back(inference_with_streams(test_params.models[i]["full_path"], test_params.device, nstreams, + test_params.api_version)); } auto test = [&] { log_info("Inference of InferRequests from networks: " << test_params.model_name << " for \"" diff --git a/tests/stress_tests/memleaks_tests/tests_pipelines/tests_pipelines.cpp b/tests/stress_tests/memleaks_tests/tests_pipelines/tests_pipelines.cpp index cc27ceec880..83ce05386ec 100644 --- a/tests/stress_tests/memleaks_tests/tests_pipelines/tests_pipelines.cpp +++ b/tests/stress_tests/memleaks_tests/tests_pipelines/tests_pipelines.cpp @@ -28,7 +28,9 @@ using namespace InferenceEngine; #define THRESHOLD 0.1 // Measure values -enum MeasureValue { VMRSS = 0, VMHWM, VMSIZE, VMPEAK, THREADS, MeasureValueMax }; +enum MeasureValue { + VMRSS = 0, VMHWM, VMSIZE, VMPEAK, THREADS, MeasureValueMax +}; namespace util { template @@ -42,7 +44,7 @@ namespace util { } }// namespace util -TestResult common_test_pipeline(std::vector> test_pipeline, const int &n) { +TestResult common_test_pipeline(const std::vector> &test_pipeline, const int &n) { if (AVERAGE_NUM > n) return TestResult(TestStatus::TEST_FAILED, "Test failed: number of iterations less than defined AVERAGE_NUM"); @@ -65,7 +67,7 @@ TestResult common_test_pipeline(std::vector> test_pipeline for (size_t iteration = 1, measure_count = n / AVERAGE_NUM;; iteration++) { // run test pipeline and collect metrics - for (auto step : test_pipeline) step(); + for (const auto &step: test_pipeline) step(); getVmValues(cur[VMSIZE], cur[VMPEAK], cur[VMRSS], cur[VMHWM]); cur[THREADS] = getThreadsNum(); @@ -103,8 +105,8 @@ TestResult common_test_pipeline(std::vector> test_pipeline // threshold = THRESHOLD * ref util::transform(ref, threshold, [](long ref_val) -> float { return THRESHOLD * ref_val; }); log_info("Setting thresholds:" - << " VMRSS=" << ref[VMRSS] << "(+-" << static_cast(threshold[VMRSS]) << ")," - << " VMHWM=" << ref[VMHWM] << "(+-" << static_cast(threshold[VMHWM]) << ")"); + << " VMRSS=" << ref[VMRSS] << "(+-" << static_cast(threshold[VMRSS]) << ")," + << " VMHWM=" << ref[VMHWM] << "(+-" << static_cast(threshold[VMHWM]) << ")"); } else if (measure_count <= 0) { // exit from main loop break; diff --git a/tests/stress_tests/memleaks_tests/tests_pipelines/tests_pipelines.h b/tests/stress_tests/memleaks_tests/tests_pipelines/tests_pipelines.h index 0c6f182db34..90aed99fa24 100644 --- a/tests/stress_tests/memleaks_tests/tests_pipelines/tests_pipelines.h +++ b/tests/stress_tests/memleaks_tests/tests_pipelines/tests_pipelines.h @@ -13,5 +13,5 @@ #include // tests_pipelines/tests_pipelines.cpp -TestResult common_test_pipeline(std::vector> test_pipeline, const int &n); +TestResult common_test_pipeline(const std::vector> &test_pipeline, const int &n); // tests_pipelines/tests_pipelines.cpp diff --git a/tests/stress_tests/scripts/run_memcheck.py b/tests/stress_tests/scripts/run_memcheck.py index 575d1232ff3..aa74a177045 100755 --- a/tests/stress_tests/scripts/run_memcheck.py +++ b/tests/stress_tests/scripts/run_memcheck.py @@ -129,6 +129,9 @@ def main(): parser.add_argument('--comparison_report', required=args.compare, help='create comparison report file name') + parser.add_argument('--gtest_filter', + default='', + help='gtest filter passed to gtest') args = parser.parse_args() @@ -150,6 +153,7 @@ def main(): '--output_dir', f'{args.output_dir}', '--workers', f'{args.workers}', '--timeout', f'{args.timeout}', + '--gtest_filter', f'{args.gtest_filter}', args.binary, '--'] + binary_args) if args.upload or args.timeline_report or args.compare: diff --git a/tests/stress_tests/unittests/tests.cpp b/tests/stress_tests/unittests/tests.cpp index c4d79d65e9f..a12d10fe2a3 100644 --- a/tests/stress_tests/unittests/tests.cpp +++ b/tests/stress_tests/unittests/tests.cpp @@ -33,8 +33,8 @@ TEST_P(UnitTestSuiteNoDevice, set_input_params) { runTest(test_set_input_params, GetParam()); } -TEST_P(UnitTestSuite, create_exenetwork) { - runTest(test_create_exenetwork, GetParam()); +TEST_P(UnitTestSuite, create_compiled_model) { + runTest(test_create_compiled_model, GetParam()); } TEST_P(UnitTestSuite, create_infer_request) { @@ -78,13 +78,16 @@ TEST_P(UnitTestSuite, infer_request_inference_full_pipeline) { // tests_pipelines/tests_pipelines_full_pipeline.cpp INSTANTIATE_TEST_SUITE_P(StressUnitTests, UnitTestSuiteNoModel, - ::testing::ValuesIn(generateTestsParams({"processes", "threads", "iterations", "devices"})), - getTestCaseName); + ::testing::ValuesIn(generateTestsParams( + {"processes", "threads", "iterations", "devices", "api_versions"})), + getTestCaseName); INSTANTIATE_TEST_SUITE_P(StressUnitTests, UnitTestSuiteNoDevice, - ::testing::ValuesIn(generateTestsParams({"processes", "threads", "iterations", "models"})), - getTestCaseName); + ::testing::ValuesIn(generateTestsParams( + {"processes", "threads", "iterations", "models", "api_versions"})), + getTestCaseName); INSTANTIATE_TEST_SUITE_P(StressUnitTests, UnitTestSuite, - ::testing::ValuesIn(generateTestsParams({"processes", "threads", "iterations", "devices", "models"})), - getTestCaseName); + ::testing::ValuesIn(generateTestsParams( + {"processes", "threads", "iterations", "devices", "models", "api_versions"})), + getTestCaseName); diff --git a/tests/stress_tests/unittests/tests_pipelines/tests_pipelines.cpp b/tests/stress_tests/unittests/tests_pipelines/tests_pipelines.cpp index 30eff12c962..4ca7fa7970b 100644 --- a/tests/stress_tests/unittests/tests_pipelines/tests_pipelines.cpp +++ b/tests/stress_tests/unittests/tests_pipelines/tests_pipelines.cpp @@ -11,75 +11,84 @@ using namespace InferenceEngine; -void test_load_unload_plugin(const std::string &model, const std::string &target_device, const int &n) { +void test_load_unload_plugin(const std::string &model, const std::string &target_device, const int &n, + const int &api_version) { log_info("Load/unload plugin for device: " << target_device << " for " << n << " times"); for (int i = 0; i < n; i++) { if (i == n / 2) { log_info("Half of the test have already passed"); } - load_unload_plugin(target_device)(); + load_unload_plugin(target_device, api_version)(); } } -void test_read_network(const std::string &model, const std::string &target_device, const int &n) { +void test_read_network(const std::string &model, const std::string &target_device, const int &n, const int &api_version) { log_info("Read network: \"" << model << "\" for " << n << " times"); for (int i = 0; i < n; i++) { if (i == n / 2) { log_info("Half of the test have already passed"); } - read_cnnnetwork(model)(); + read_cnnnetwork(model, api_version)(); } } -void test_cnnnetwork_reshape_batch_x2(const std::string &model, const std::string &target_device, const int &n) { +void test_cnnnetwork_reshape_batch_x2(const std::string &model, const std::string &target_device, const int &n, + const int &api_version) { log_info("Reshape to batch*=2 of CNNNetwork created from network: \"" << model << "\" for " << n << " times"); for (int i = 0; i < n; i++) { if (i == n / 2) { log_info("Half of the test have already passed"); } - cnnnetwork_reshape_batch_x2(model)(); + cnnnetwork_reshape_batch_x2(model, i, api_version)(); } } -void test_set_input_params(const std::string &model, const std::string &target_device, const int &n) { +void test_set_input_params(const std::string &model, const std::string &target_device, const int &n, + const int &api_version) { log_info("Apply preprocessing for CNNNetwork from network: \"" << model << "\" for " << n << " times"); for (int i = 0; i < n; i++) { if (i == n / 2) { log_info("Half of the test have already passed"); } - set_input_params(model)(); + set_input_params(model, api_version)(); } } -void test_create_exenetwork(const std::string &model, const std::string &target_device, const int &n) { +void test_create_compiled_model(const std::string &model, const std::string &target_device, const int &n, + const int &api_version) { log_info("Create ExecutableNetwork from network: \"" << model - << "\" for device: \"" << target_device << "\" for " << n << " times"); + << "\" for device: \"" << target_device << "\" for " << n + << " times"); for (int i = 0; i < n; i++) { if (i == n / 2) { log_info("Half of the test have already passed"); } - create_exenetwork(model, target_device)(); + create_compiled_model(model, target_device, api_version)(); } } -void test_create_infer_request(const std::string &model, const std::string &target_device, const int &n) { +void test_create_infer_request(const std::string &model, const std::string &target_device, const int &n, + const int &api_version) { log_info("Create InferRequest from network: \"" << model - << "\" for device: \"" << target_device << "\" for " << n << " times"); + << "\" for device: \"" << target_device << "\" for " << n + << " times"); for (int i = 0; i < n; i++) { if (i == n / 2) { log_info("Half of the test have already passed"); } - create_infer_request(model, target_device)(); + create_infer_request(model, target_device, api_version)(); } } -void test_infer_request_inference(const std::string &model, const std::string &target_device, const int &n) { +void test_infer_request_inference(const std::string &model, const std::string &target_device, const int &n, + const int &api_version) { log_info("Inference of InferRequest from network: \"" << model - << "\" for device: \"" << target_device << "\" for " << n << " times"); + << "\" for device: \"" << target_device << "\" for " << n + << " times"); for (int i = 0; i < n; i++) { if (i == n / 2) { log_info("Half of the test have already passed"); } - infer_request_inference(model, target_device)(); + infer_request_inference(model, target_device, api_version)(); } } diff --git a/tests/stress_tests/unittests/tests_pipelines/tests_pipelines.h b/tests/stress_tests/unittests/tests_pipelines/tests_pipelines.h index 57a68b9435b..74f7e1ff576 100644 --- a/tests/stress_tests/unittests/tests_pipelines/tests_pipelines.h +++ b/tests/stress_tests/unittests/tests_pipelines/tests_pipelines.h @@ -10,21 +10,47 @@ #include // tests_pipelines/tests_pipelines.cpp -void test_load_unload_plugin(const std::string &model, const std::string &target_device, const int &n); -void test_read_network(const std::string &model, const std::string &target_device, const int &n); -void test_cnnnetwork_reshape_batch_x2(const std::string &model, const std::string &target_device, const int &n); -void test_set_input_params(const std::string &model, const std::string &target_device, const int &n); -void test_create_exenetwork(const std::string &model, const std::string &target_device, const int &n); -void test_create_infer_request(const std::string &model, const std::string &target_device, const int &n); -void test_infer_request_inference(const std::string &model, const std::string &target_device, const int &n); +void test_load_unload_plugin(const std::string &model, const std::string &target_device, const int &n, + const int &api_version); + +void test_read_network(const std::string &model, const std::string &target_device, const int &n, + const int &api_version); + +void test_cnnnetwork_reshape_batch_x2(const std::string &model, const std::string &target_device, const int &n, + const int &api_version); + +void test_set_input_params(const std::string &model, const std::string &target_device, const int &n, + const int &api_version); + +void test_create_compiled_model(const std::string &model, const std::string &target_device, const int &n, + const int &api_version); + +void test_create_infer_request(const std::string &model, const std::string &target_device, const int &n, + const int &api_version); + +void test_infer_request_inference(const std::string &model, const std::string &target_device, const int &n, + const int &api_version); // tests_pipelines/tests_pipelines.cpp // tests_pipelines/tests_pipelines_full_pipeline.cpp -void test_load_unload_plugin_full_pipeline(const std::string &model, const std::string &target_device, const int &n); -void test_read_network_full_pipeline(const std::string &model, const std::string &target_device, const int &n); -void test_set_input_params_full_pipeline(const std::string &model, const std::string &target_device, const int &n); -void test_cnnnetwork_reshape_batch_x2_full_pipeline(const std::string &model, const std::string &target_device, const int &n); -void test_create_exenetwork_full_pipeline(const std::string &model, const std::string &target_device, const int &n); -void test_create_infer_request_full_pipeline(const std::string &model, const std::string &target_device, const int &n); -void test_infer_request_inference_full_pipeline(const std::string &model, const std::string &target_device, const int &n); +void test_load_unload_plugin_full_pipeline(const std::string &model, const std::string &target_device, const int &n, + const int &api_version); + +void test_read_network_full_pipeline(const std::string &model, const std::string &target_device, const int &n, + const int &api_version); + +void test_set_input_params_full_pipeline(const std::string &model, const std::string &target_device, const int &n, + const int &api_version); + +void test_cnnnetwork_reshape_batch_x2_full_pipeline(const std::string &model, const std::string &target_device, + const int &n, const int &api_version); + +void test_create_exenetwork_full_pipeline(const std::string &model, const std::string &target_device, const int &n, + const int &api_version); + +void test_create_infer_request_full_pipeline(const std::string &model, const std::string &target_device, const int &n, + const int &api_version); + +void test_infer_request_inference_full_pipeline(const std::string &model, const std::string &target_device, + const int &n, const int &api_version); // tests_pipelines/tests_pipelines_full_pipeline.cpp diff --git a/tests/stress_tests/unittests/tests_pipelines/tests_pipelines_full_pipeline.cpp b/tests/stress_tests/unittests/tests_pipelines/tests_pipelines_full_pipeline.cpp index 1ec4bd72201..8d9d3c8c324 100644 --- a/tests/stress_tests/unittests/tests_pipelines/tests_pipelines_full_pipeline.cpp +++ b/tests/stress_tests/unittests/tests_pipelines/tests_pipelines_full_pipeline.cpp @@ -3,13 +3,10 @@ // #include "tests_pipelines.h" -#include "common_utils.h" #include +#include -#include - -using namespace InferenceEngine; #define batchIndex 0 @@ -34,247 +31,126 @@ using namespace InferenceEngine; else \ throw std::logic_error("Reshape wasn't applied for a model."); -void test_load_unload_plugin_full_pipeline(const std::string &model, const std::string &target_device, const int &n) { +void test_load_unload_plugin_full_pipeline(const std::string &model, const std::string &target_device, const int &n, + const int &api_version) { log_info("Load/unload plugin for device: " << target_device << " for " << n << " times"); - Core ie; + auto ie_api_wrapper = create_infer_api_wrapper(api_version); for (int i = 0; i < n; i++) { if (i == n / 2) { log_info("Half of the test have already passed"); } - // GetVersions silently register plugin in `plugins` through `GetCPPPluginByName` - ie.GetVersions(target_device); + // get_versions silently register plugin in `plugins` through `GetCPPPluginByName` + ie_api_wrapper->load_plugin(target_device); // Remove plugin for target_device from `plugins` - ie.UnregisterPlugin(target_device); + ie_api_wrapper->unload_plugin(target_device); } - CNNNetwork cnnNetwork = ie.ReadNetwork(model); - InputsDataMap inputInfo(cnnNetwork.getInputsInfo()); - ICNNNetwork::InputShapes shapes = cnnNetwork.getInputShapes(); - bool doReshape = false; - for (auto &input : inputInfo) { - setInputParameters(); - computeShapesToReshape(); - } - reshapeCNNNetwork(); - ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, target_device); - InferRequest infer_request = exeNetwork.CreateInferRequest(); - - auto batchSize = cnnNetwork.getBatchSize(); - batchSize = batchSize != 0 ? batchSize : 1; - const ConstInputsDataMap inputsInfo(exeNetwork.GetInputsInfo()); - fillBlobs(infer_request, inputsInfo, batchSize); - - infer_request.Infer(); - OutputsDataMap output_info(cnnNetwork.getOutputsInfo()); - for (auto &output : output_info) - Blob::Ptr outputBlob = infer_request.GetBlob(output.first); + ie_api_wrapper->read_network(model); + ie_api_wrapper->load_network(target_device); + ie_api_wrapper->create_infer_request(); + ie_api_wrapper->prepare_input(); + ie_api_wrapper->infer(); } -void test_read_network_full_pipeline(const std::string &model, const std::string &target_device, const int &n) { +void test_read_network_full_pipeline(const std::string &model, const std::string &target_device, const int &n, + const int &api_version) { + auto ie_api_wrapper = create_infer_api_wrapper(api_version); log_info("Read network: \"" << model << "\" for " << n << " times"); - Core ie; - CNNNetwork cnnNetwork; for (int i = 0; i < n; i++) { if (i == n / 2) { log_info("Half of the test have already passed"); } - cnnNetwork = ie.ReadNetwork(model); + ie_api_wrapper->read_network(model); } - InputsDataMap inputInfo(cnnNetwork.getInputsInfo()); - ICNNNetwork::InputShapes shapes = cnnNetwork.getInputShapes(); - bool doReshape = false; - for (auto &input : inputInfo) { - setInputParameters(); - computeShapesToReshape(); - } - reshapeCNNNetwork(); - ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, target_device); - InferRequest infer_request = exeNetwork.CreateInferRequest(); - - auto batchSize = cnnNetwork.getBatchSize(); - batchSize = batchSize != 0 ? batchSize : 1; - const ConstInputsDataMap inputsInfo(exeNetwork.GetInputsInfo()); - fillBlobs(infer_request, inputsInfo, batchSize); - - infer_request.Infer(); - OutputsDataMap output_info(cnnNetwork.getOutputsInfo()); - for (auto &output : output_info) - Blob::Ptr outputBlob = infer_request.GetBlob(output.first); + ie_api_wrapper->load_network(target_device); + ie_api_wrapper->create_infer_request(); + ie_api_wrapper->prepare_input(); + ie_api_wrapper->infer(); } -void test_set_input_params_full_pipeline(const std::string &model, const std::string &target_device, const int &n) { +void test_set_input_params_full_pipeline(const std::string &model, const std::string &target_device, const int &n, + const int &api_version) { + auto ie_api_wrapper = create_infer_api_wrapper(api_version); log_info("Apply preprocessing for CNNNetwork from network: \"" << model << "\" for " << n << " times"); - Core ie; - CNNNetwork cnnNetwork = ie.ReadNetwork(model); - InputsDataMap inputInfo(cnnNetwork.getInputsInfo()); for (int i = 0; i < n; i++) { if (i == n / 2) { log_info("Half of the test have already passed"); } - for (auto &input : inputInfo) { - setInputParameters(); - } + ie_api_wrapper->set_input_params(model); } - ICNNNetwork::InputShapes shapes = cnnNetwork.getInputShapes(); - bool doReshape = false; - for (auto &input : inputInfo) { - computeShapesToReshape(); - } - reshapeCNNNetwork(); - ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, target_device); - InferRequest infer_request = exeNetwork.CreateInferRequest(); - - auto batchSize = cnnNetwork.getBatchSize(); - batchSize = batchSize != 0 ? batchSize : 1; - const ConstInputsDataMap inputsInfo(exeNetwork.GetInputsInfo()); - fillBlobs(infer_request, inputsInfo, batchSize); - - infer_request.Infer(); - OutputsDataMap output_info(cnnNetwork.getOutputsInfo()); - for (auto &output : output_info) - Blob::Ptr outputBlob = infer_request.GetBlob(output.first); + ie_api_wrapper->load_network(target_device); + ie_api_wrapper->create_infer_request(); + ie_api_wrapper->prepare_input(); + ie_api_wrapper->infer(); } -void test_cnnnetwork_reshape_batch_x2_full_pipeline(const std::string &model, const std::string &target_device, const int &n) { +void test_cnnnetwork_reshape_batch_x2_full_pipeline(const std::string &model, const std::string &target_device, + const int &n, const int &api_version) { + auto ie_api_wrapper = create_infer_api_wrapper(api_version); log_info("Reshape to batch*=2 of CNNNetwork created from network: \"" << model << "\" for " << n << " times"); - Core ie; - CNNNetwork cnnNetwork = ie.ReadNetwork(model); - InputsDataMap inputInfo(cnnNetwork.getInputsInfo()); - for (auto &input : inputInfo) { - setInputParameters(); - } - ICNNNetwork::InputShapes shapes = cnnNetwork.getInputShapes(); - bool doReshape = false; - int prev_batch = -1, new_batch; - for (auto &input : inputInfo) { - auto layout = input.second->getTensorDesc().getLayout(); - if ((layout == Layout::NCHW) || (layout == Layout::NC)) - prev_batch = shapes[input.first][batchIndex]; - } - if (prev_batch == -1) - throw std::logic_error("Reshape wasn't applied for a model."); - + ie_api_wrapper->read_network(model); for (int i = 0; i < n; i++) { if (i == n / 2) { log_info("Half of the test have already passed"); } - - new_batch = ((i % 2) == 0) ? prev_batch * 2 : prev_batch; - for (auto &input : inputInfo) { - auto layout = input.second->getTensorDesc().getLayout(); - if ((layout == Layout::NCHW) || (layout == Layout::NC)) { - shapes[input.first][batchIndex] = new_batch; - doReshape = true; - } - } - reshapeCNNNetwork(); + ie_api_wrapper->change_batch_size(2, i); } - ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, target_device); - InferRequest infer_request = exeNetwork.CreateInferRequest(); - - auto batchSize = cnnNetwork.getBatchSize(); - batchSize = batchSize != 0 ? batchSize : 1; - const ConstInputsDataMap inputsInfo(exeNetwork.GetInputsInfo()); - fillBlobs(infer_request, inputsInfo, batchSize); - - infer_request.Infer(); - OutputsDataMap output_info(cnnNetwork.getOutputsInfo()); - for (auto &output : output_info) - Blob::Ptr outputBlob = infer_request.GetBlob(output.first); + ie_api_wrapper->load_network(target_device); + ie_api_wrapper->create_infer_request(); + ie_api_wrapper->prepare_input(); + ie_api_wrapper->infer(); } -void test_create_exenetwork_full_pipeline(const std::string &model, const std::string &target_device, const int &n) { +void test_create_exenetwork_full_pipeline(const std::string &model, const std::string &target_device, const int &n, + const int &api_version) { + auto ie_api_wrapper = create_infer_api_wrapper(api_version); log_info("Create ExecutableNetwork from network: \"" << model - << "\" for device: \"" << target_device << "\" for " << n << " times"); - Core ie; - CNNNetwork cnnNetwork = ie.ReadNetwork(model); - InputsDataMap inputInfo(cnnNetwork.getInputsInfo()); - ICNNNetwork::InputShapes shapes = cnnNetwork.getInputShapes(); - bool doReshape = false; - for (auto &input : inputInfo) { - setInputParameters(); - computeShapesToReshape(); - } - reshapeCNNNetwork(); - ExecutableNetwork exeNetwork; + << "\" for device: \"" << target_device << "\" for " << n + << " times"); + ie_api_wrapper->read_network(model); for (int i = 0; i < n; i++) { if (i == n / 2) { log_info("Half of the test have already passed"); } - exeNetwork = ie.LoadNetwork(cnnNetwork, target_device); + ie_api_wrapper->load_network(target_device); } - InferRequest infer_request = exeNetwork.CreateInferRequest(); - - auto batchSize = cnnNetwork.getBatchSize(); - batchSize = batchSize != 0 ? batchSize : 1; - const ConstInputsDataMap inputsInfo(exeNetwork.GetInputsInfo()); - fillBlobs(infer_request, inputsInfo, batchSize); - - infer_request.Infer(); - OutputsDataMap output_info(cnnNetwork.getOutputsInfo()); - for (auto &output : output_info) - Blob::Ptr outputBlob = infer_request.GetBlob(output.first); + ie_api_wrapper->create_infer_request(); + ie_api_wrapper->prepare_input(); + ie_api_wrapper->infer(); } -void test_create_infer_request_full_pipeline(const std::string &model, const std::string &target_device, const int &n) { +void test_create_infer_request_full_pipeline(const std::string &model, const std::string &target_device, const int &n, + const int &api_version) { + auto ie_api_wrapper = create_infer_api_wrapper(api_version); log_info("Create InferRequest from network: \"" << model - << "\" for device: \"" << target_device << "\" for " << n << " times"); - Core ie; - CNNNetwork cnnNetwork = ie.ReadNetwork(model); - InputsDataMap inputInfo(cnnNetwork.getInputsInfo()); - ICNNNetwork::InputShapes shapes = cnnNetwork.getInputShapes(); - bool doReshape = false; - for (auto &input : inputInfo) { - setInputParameters(); - computeShapesToReshape(); - } - reshapeCNNNetwork(); - ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, target_device); - InferRequest infer_request; - - auto batchSize = cnnNetwork.getBatchSize(); - batchSize = batchSize != 0 ? batchSize : 1; - const ConstInputsDataMap inputsInfo(exeNetwork.GetInputsInfo()); + << "\" for device: \"" << target_device << "\" for " << n + << " times"); + ie_api_wrapper->read_network(model); + ie_api_wrapper->load_network(target_device); for (int i = 0; i < n; i++) { if (i == n / 2) { log_info("Half of the test have already passed"); } - infer_request = exeNetwork.CreateInferRequest(); - fillBlobs(infer_request, inputsInfo, batchSize); + ie_api_wrapper->create_infer_request(); + ie_api_wrapper->prepare_input(); } - infer_request.Infer(); - OutputsDataMap output_info(cnnNetwork.getOutputsInfo()); - for (auto &output : output_info) - Blob::Ptr outputBlob = infer_request.GetBlob(output.first); + ie_api_wrapper->infer(); } -void test_infer_request_inference_full_pipeline(const std::string &model, const std::string &target_device, const int &n) { + +void test_infer_request_inference_full_pipeline(const std::string &model, const std::string &target_device, + const int &n, const int &api_version) { + auto ie_api_wrapper = create_infer_api_wrapper(api_version); log_info("Inference of InferRequest from network: \"" << model - << "\" for device: \"" << target_device << "\" for " << n << " times"); - Core ie; - CNNNetwork cnnNetwork = ie.ReadNetwork(model); - InputsDataMap inputInfo(cnnNetwork.getInputsInfo()); - ICNNNetwork::InputShapes shapes = cnnNetwork.getInputShapes(); - bool doReshape = false; - for (auto &input : inputInfo) { - setInputParameters(); - computeShapesToReshape(); - } - reshapeCNNNetwork(); - ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, target_device); - InferRequest infer_request = exeNetwork.CreateInferRequest(); - - auto batchSize = cnnNetwork.getBatchSize(); - batchSize = batchSize != 0 ? batchSize : 1; - const ConstInputsDataMap inputsInfo(exeNetwork.GetInputsInfo()); - fillBlobs(infer_request, inputsInfo, batchSize); - + << "\" for device: \"" << target_device << "\" for " << n + << " times"); + ie_api_wrapper->read_network(model); + ie_api_wrapper->load_network(target_device); + ie_api_wrapper->create_infer_request(); + ie_api_wrapper->prepare_input(); for (int i = 0; i < n; i++) { if (i == n / 2) { log_info("Half of the test have already passed"); } - infer_request.Infer(); - OutputsDataMap output_info(cnnNetwork.getOutputsInfo()); - for (auto &output : output_info) - Blob::Ptr outputBlob = infer_request.GetBlob(output.first); + ie_api_wrapper->infer(); } }