TESTS: Added test for parallel LoadNetwork with accuracy check (#910)

This commit is contained in:
Ilya Lavrenov 2020-06-15 12:22:59 +03:00 committed by GitHub
parent b058948763
commit 7049142080
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -6,10 +6,14 @@
#include <details/ie_exception.hpp>
#include <ie_plugin_config.hpp>
#include <ie_extension.h>
#include <cpp/ie_cnn_network.h>
#include <cpp/ie_executable_network.hpp>
#include <cpp/ie_infer_request.hpp>
#include <multi-device/multi_device_config.hpp>
#include <file_utils.h>
#include <ngraph_functions/subgraph_builders.hpp>
#include <functional_test_utils/blob_utils.hpp>
#include <functional_test_utils/test_model/test_model.hpp>
#include <common_test_utils/file_utils.hpp>
#include <common_test_utils/test_assertions.hpp>
@ -193,7 +197,68 @@ TEST_P(CoreThreadingTestsWithIterations, smoke_LoadNetwork) {
ie.SetConfig(config, deviceName);
runParallel([&] () {
auto value = counter++;
(void)ie.LoadNetwork(networks[(counter++) % networks.size()], deviceName);
(void)ie.LoadNetwork(networks[value % networks.size()], deviceName);
}, numIterations, numThreads);
}
// tested function: LoadNetwork accuracy
TEST_P(CoreThreadingTestsWithIterations, smoke_LoadNetworkAccuracy) {
InferenceEngine::Core ie;
std::atomic<unsigned int> counter{0u};
const FuncTestUtils::TestModel::TestModel models[] = {
FuncTestUtils::TestModel::convReluNormPoolFcModelFP32,
FuncTestUtils::TestModel::convReluNormPoolFcModelFP16
};
std::vector<InferenceEngine::CNNNetwork> networks;
for (auto & model : models) {
networks.emplace_back(ie.ReadNetwork(model.model_xml_str, model.weights_blob));
}
// TODO: uncomment after fixing *-31414
// networks.emplace_back(InferenceEngine::CNNNetwork(ngraph::builder::subgraph::make2InputSubtract()));
// networks.emplace_back(InferenceEngine::CNNNetwork(ngraph::builder::subgraph::makeMultiSingleConv()));
// networks.emplace_back(InferenceEngine::CNNNetwork(ngraph::builder::subgraph::makeSingleConv()));
// networks.emplace_back(InferenceEngine::CNNNetwork(ngraph::builder::subgraph::makeSplitConvConcat()));
// networks.emplace_back(InferenceEngine::CNNNetwork(ngraph::builder::subgraph::makeSplitMultiConvConcat()));
ie.SetConfig(config, deviceName);
runParallel([&] () {
auto value = counter++;
auto network = networks[value % networks.size()];
InferenceEngine::BlobMap blobs;
for (const auto & info : network.getInputsInfo()) {
auto input = FuncTestUtils::createAndFillBlobFloatNormalDistribution(
info.second->getTensorDesc(), 0.0f, 0.2f, 7235346);
blobs[info.first] = input;
}
auto getOutputBlob = [&](InferenceEngine::Core & core) {
auto exec = core.LoadNetwork(network, deviceName);
auto req = exec.CreateInferRequest();
req.SetInput(blobs);
auto info = network.getOutputsInfo();
auto outputInfo = info.begin();
auto blob = make_blob_with_precision(outputInfo->second->getTensorDesc());
blob->allocate();
req.SetBlob(outputInfo->first, blob);
req.Infer();
return blob;
};
auto outputActual = getOutputBlob(ie);
// compare actual value using the second Core
{
InferenceEngine::Core ie2;
ie2.SetConfig(config, deviceName);
auto outputRef = getOutputBlob(ie2);
FuncTestUtils::compareBlobs(outputActual, outputRef);
}
}, numIterations, numThreads);
}
@ -211,7 +276,7 @@ TEST_P(CoreThreadingTestsWithIterations, smoke_LoadNetwork_MultipleIECores) {
auto value = counter++;
InferenceEngine::Core ie;
ie.SetConfig(config, deviceName);
auto model = models[(counter++) % models.size()];
auto model = models[value % models.size()];
auto network = ie.ReadNetwork(model.model_xml_str, model.weights_blob);
(void)ie.LoadNetwork(network, deviceName);
}, numIterations, numThreads);