Apa/multi no legacy (#3331)

* Removed legacy network represetnation usage

* Updated core threading tests not to use legacy networks

* Removed QueryNetworkMULTIwithHETERONoThrowv7 test

* Removed QueryNetworkHETEROwithMULTINoThrow_v7 test

* Added QueryNEtwork test add skipped for GNA

Co-authored-by: apankratovantonp <anton.pankratov@intel.com>
This commit is contained in:
Ilya Lavrenov 2020-11-26 19:18:13 +03:00 committed by GitHub
parent 3126fbd102
commit 7f01b5c28b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 32 additions and 131 deletions

View File

@ -12,7 +12,7 @@ ie_add_plugin(NAME ${TARGET_NAME}
SOURCES ${SOURCES} ${HEADERS}
VERSION_DEFINES_FOR multi_device_plugin.cpp)
target_link_libraries(${TARGET_NAME} PRIVATE inference_engine_legacy inference_engine)
target_link_libraries(${TARGET_NAME} PRIVATE inference_engine)
set_ie_threading_interface_for(${TARGET_NAME})

View File

@ -15,7 +15,6 @@
#include "ie_metric_helpers.hpp"
#include <legacy/ie_util_internal.hpp>
#include <cpp_interfaces/base/ie_infer_async_request_base.hpp>
#include <multi-device/multi_device_config.hpp>
#include <ie_plugin_config.hpp>

View File

@ -8,9 +8,10 @@
#include <memory>
#include <map>
#include <unordered_map>
#include <unordered_set>
#include <ie_metric_helpers.hpp>
#include <legacy/ie_util_internal.hpp>
#include <multi-device/multi_device_config.hpp>
#include "multi_device_plugin.hpp"
@ -152,6 +153,10 @@ ExecutableNetworkInternal::Ptr MultiDeviceInferencePlugin::LoadExeNetworkImpl(co
THROW_IE_EXCEPTION << "Please, work with MULTI device via InferencEngine::Core object";
}
if (network.getFunction() == nullptr) {
THROW_IE_EXCEPTION << "MULTI device supports just ngraph network representation";
}
auto fullConfig = mergeConfigs(_config, config);
auto priorities = fullConfig.find(MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES);
if (priorities == fullConfig.end()) {
@ -168,8 +173,9 @@ ExecutableNetworkInternal::Ptr MultiDeviceInferencePlugin::LoadExeNetworkImpl(co
for (auto& p : metaDevices) {
auto & deviceName = p.deviceName;
auto & deviceConfig = p.config;
auto clonedNetwork = cloneNetwork(network);
executableNetworkPerDevice.insert({ deviceName, GetCore()->LoadNetwork(CNNNetwork{clonedNetwork}, deviceName, deviceConfig) });
executableNetworkPerDevice.insert({ deviceName, GetCore()->LoadNetwork(
CNNNetwork{ICNNNetwork::Ptr{const_cast<ICNNNetwork*>(&network),
[](ICNNNetwork*){}}}, deviceName, deviceConfig) });
multiNetworkConfig.insert(deviceConfig.begin(), deviceConfig.end());
}
if (executableNetworkPerDevice.empty())
@ -193,6 +199,10 @@ QueryNetworkResult MultiDeviceInferencePlugin::QueryNetwork(const ICNNNetwork&
THROW_IE_EXCEPTION << "Please, work with MULTI device via InferencEngine::Core object";
}
if (network.getFunction() == nullptr) {
THROW_IE_EXCEPTION << "MULTI device supports just ngraph network representation";
}
queryResult.rc = StatusCode::OK;
queryResult.supportedLayersMap.clear();
@ -201,57 +211,22 @@ QueryNetworkResult MultiDeviceInferencePlugin::QueryNetwork(const ICNNNetwork&
if (priorities == fullConfig.end()) {
THROW_IE_EXCEPTION << "KEY_MULTI_DEVICE_PRIORITIES key is not set for MULTI device";
}
auto metaDevices = ParseMetaDevices(priorities->second, fullConfig);
std::unordered_set<std::string> supportedLayers;
auto allSupportsNgraph =
std::all_of(std::begin(metaDevices), std::end(metaDevices),
[&] (const DeviceInformation& value) -> bool {
auto clonedNetwork = cloneNetwork(network);
try { GetCore()->QueryNetwork(*clonedNetwork, value.deviceName, value.config); }
catch (const InferenceEngine::details::InferenceEngineException & ex) {
std::string message = ex.what();
return message.find(NOT_IMPLEMENTED_str) == std::string::npos;
}
return true;
});
for (auto&& value : metaDevices) {
auto queryNetwork = [&] (const InferenceEngine::ICNNNetwork & networkObject) {
auto clonedNetwork = cloneNetwork(networkObject);
auto deviceQr = GetCore()->QueryNetwork(*clonedNetwork, value.deviceName, value.config);
std::unordered_set<std::string> deviceSupportedLayers;
for (auto&& layerQr : deviceQr.supportedLayersMap) {
deviceSupportedLayers.emplace(layerQr.first);
}
supportedLayers = supportedLayers.empty()
? deviceSupportedLayers : (deviceSupportedLayers.empty()
? supportedLayers : Intersection(supportedLayers, deviceSupportedLayers));
};
if (network.getFunction()) {
if (!allSupportsNgraph) {
if (contains(fullConfig, CONFIG_KEY_INTERNAL(AGGREGATED_PLUGIN))) {
THROW_IE_EXCEPTION << NOT_IMPLEMENTED_str;
} else {
auto cnnNetworkImpl = std::make_shared<details::CNNNetworkImpl>(network);
if (cnnNetworkImpl == nullptr)
THROW_IE_EXCEPTION << "Cannot create CNNNetworkImpl shared_ptr";
queryNetwork(*cnnNetworkImpl);
}
} else {
queryNetwork(network);
}
} else {
queryNetwork(network);
auto deviceQr = GetCore()->QueryNetwork(CNNNetwork{ICNNNetwork::Ptr{const_cast<ICNNNetwork*>(&network),
[](ICNNNetwork*){}}}, value.deviceName, value.config);
std::unordered_set<std::string> deviceSupportedLayers;
for (auto&& layerQr : deviceQr.supportedLayersMap) {
deviceSupportedLayers.emplace(layerQr.first);
}
supportedLayers = supportedLayers.empty()
? deviceSupportedLayers : (deviceSupportedLayers.empty()
? supportedLayers : Intersection(supportedLayers, deviceSupportedLayers));
}
for (auto&& supportedLayer : supportedLayers) {
queryResult.supportedLayersMap[supportedLayer] = GetName();
}
return queryResult;
}

View File

@ -45,5 +45,7 @@ std::vector<std::string> disabledTestPatterns() {
R"(.*ActivationConcatsEltwise.*CS=35.*)",
// TODO: Issue: 40960
R"(.*(ConstantResultSubgraphTest).*)",
// TODO: Issue: 29577
R"(.*CoreThreadingTests.smoke_QueryNetwork.*)"
};
}

View File

@ -1350,63 +1350,6 @@ TEST_P(IEClassLoadNetworkTest, LoadNetworkMULTIwithHETERONoThrow) {
// QueryNetwork with HETERO on MULTI combinations particular device
//
TEST_P(IEClassLoadNetworkTest, QueryNetworkHETEROwithMULTINoThrow_v7) {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
Core ie;
if (supportsDeviceID(ie, deviceName) && supportsAvaliableDevices(ie, deviceName)) {
std::string devices;
auto availableDevices = ie.GetMetric(deviceName, METRIC_KEY(AVAILABLE_DEVICES)).as<std::vector<std::string>>();
for (auto &&device : availableDevices) {
devices += deviceName + '.' + device;
if (&device != &(availableDevices.back())) {
devices += ',';
}
}
auto convertedActualNetwork = std::make_shared<details::CNNNetworkImpl>(actualNetwork);
QueryNetworkResult result;
std::string targetFallback(std::string(CommonTestUtils::DEVICE_MULTI) + "," + CommonTestUtils::DEVICE_CPU);
ASSERT_NO_THROW(result = ie.QueryNetwork(InferenceEngine::CNNNetwork{convertedActualNetwork}, CommonTestUtils::DEVICE_HETERO, {
{MULTI_CONFIG_KEY(DEVICE_PRIORITIES), devices},
{"TARGET_FALLBACK", targetFallback}}));
for (auto &&layer : result.supportedLayersMap) {
EXPECT_NO_THROW(CommonTestUtils::getLayerByName(convertedActualNetwork.get(), layer.first));
}
} else {
GTEST_SKIP();
}
}
TEST_P(IEClassLoadNetworkTest, QueryNetworkMULTIwithHETERONoThrowv7) {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
Core ie;
if (supportsDeviceID(ie, deviceName) && supportsAvaliableDevices(ie, deviceName)) {
std::string devices;
auto availableDevices = ie.GetMetric(deviceName, METRIC_KEY(AVAILABLE_DEVICES)).as<std::vector<std::string>>();
for (auto &&device : availableDevices) {
devices += CommonTestUtils::DEVICE_HETERO + std::string(".") + device;
if (&device != &(availableDevices.back())) {
devices += ',';
}
}
QueryNetworkResult result;
auto convertedActualNetwork = std::make_shared<details::CNNNetworkImpl>(actualNetwork);
ASSERT_NO_THROW(result = ie.QueryNetwork(InferenceEngine::CNNNetwork{convertedActualNetwork}, CommonTestUtils::DEVICE_MULTI, {
{MULTI_CONFIG_KEY(DEVICE_PRIORITIES), devices},
{"TARGET_FALLBACK", deviceName + "," + CommonTestUtils::DEVICE_CPU}}));
for (auto &&layer : result.supportedLayersMap) {
EXPECT_NO_THROW(CommonTestUtils::getLayerByName(convertedActualNetwork.get(), layer.first));
}
} else {
GTEST_SKIP();
}
}
TEST_P(IEClassLoadNetworkTest, QueryNetworkHETEROWithMULTINoThrow_V10) {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
Core ie;

View File

@ -157,8 +157,7 @@ TEST_P(CoreThreadingTests, smoke_QueryNetwork) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
InferenceEngine::Core ie;
auto model = FuncTestUtils::TestModel::convReluNormPoolFcModelFP32;
auto network = ie.ReadNetwork(model.model_xml_str, model.weights_blob);
InferenceEngine::CNNNetwork network(ngraph::builder::subgraph::make2InputSubtract());
ie.SetConfig(config, deviceName);
InferenceEngine::QueryNetworkResult refResult = ie.QueryNetwork(network, deviceName);
@ -224,15 +223,7 @@ TEST_P(CoreThreadingTestsWithIterations, smoke_LoadNetwork) {
InferenceEngine::Core ie;
std::atomic<unsigned int> counter{0u};
const FuncTestUtils::TestModel::TestModel models[] = {
FuncTestUtils::TestModel::convReluNormPoolFcModelFP32,
FuncTestUtils::TestModel::convReluNormPoolFcModelFP16
};
std::vector<InferenceEngine::CNNNetwork> networks;
for (auto & model : models) {
networks.emplace_back(ie.ReadNetwork(model.model_xml_str, model.weights_blob));
}
networks.emplace_back(InferenceEngine::CNNNetwork(ngraph::builder::subgraph::make2InputSubtract()));
networks.emplace_back(InferenceEngine::CNNNetwork(ngraph::builder::subgraph::makeMultiSingleConv()));
networks.emplace_back(InferenceEngine::CNNNetwork(ngraph::builder::subgraph::makeSingleConv()));
@ -253,15 +244,7 @@ TEST_P(CoreThreadingTestsWithIterations, smoke_LoadNetworkAccuracy) {
InferenceEngine::Core ie;
std::atomic<unsigned int> counter{0u};
const FuncTestUtils::TestModel::TestModel models[] = {
FuncTestUtils::TestModel::convReluNormPoolFcModelFP32,
FuncTestUtils::TestModel::convReluNormPoolFcModelFP16
};
std::vector<InferenceEngine::CNNNetwork> networks;
for (auto & model : models) {
networks.emplace_back(ie.ReadNetwork(model.model_xml_str, model.weights_blob));
}
networks.emplace_back(InferenceEngine::CNNNetwork(ngraph::builder::subgraph::make2InputSubtract()));
networks.emplace_back(InferenceEngine::CNNNetwork(ngraph::builder::subgraph::makeMultiSingleConv()));
networks.emplace_back(InferenceEngine::CNNNetwork(ngraph::builder::subgraph::makeSingleConv()));
@ -314,18 +297,17 @@ TEST_P(CoreThreadingTestsWithIterations, smoke_LoadNetwork_MultipleIECores) {
std::atomic<unsigned int> counter{0u};
// TODO: replace with subgraph builders after fixing *-31414
const std::vector<FuncTestUtils::TestModel::TestModel> models = {
FuncTestUtils::TestModel::convReluNormPoolFcModelFP32,
FuncTestUtils::TestModel::convReluNormPoolFcModelFP16
};
std::vector<InferenceEngine::CNNNetwork> networks;
networks.emplace_back(InferenceEngine::CNNNetwork(ngraph::builder::subgraph::make2InputSubtract()));
networks.emplace_back(InferenceEngine::CNNNetwork(ngraph::builder::subgraph::makeMultiSingleConv()));
networks.emplace_back(InferenceEngine::CNNNetwork(ngraph::builder::subgraph::makeSingleConv()));
networks.emplace_back(InferenceEngine::CNNNetwork(ngraph::builder::subgraph::makeSplitConvConcat()));
networks.emplace_back(InferenceEngine::CNNNetwork(ngraph::builder::subgraph::makeSplitMultiConvConcat()));
runParallel([&] () {
auto value = counter++;
InferenceEngine::Core ie;
ie.SetConfig(config, deviceName);
auto model = models[value % models.size()];
auto network = ie.ReadNetwork(model.model_xml_str, model.weights_blob);
(void)ie.LoadNetwork(network, deviceName);
(void)ie.LoadNetwork(networks[value % networks.size()], deviceName);
}, numIterations, numThreads);
}