Fix the logic issue of handling the ov::device::properties in the ie core. (#13482)

* Update the logic to handle the ov:device::properties in the ie core.

Signed-off-by: Wang, Yang <yang4.wang@intel.com>

* Update test cases.

Signed-off-by: Wang, Yang <yang4.wang@intel.com>

* 1. Remove the config filter within AUTO plugin when passing the configs to target hardwre device through AUTO plugin.
2. add test cases to check if the secondary properties have been passed into the target device through virtaul device.

Signed-off-by: Wang, Yang <yang4.wang@intel.com>

* Add mock tests to check if device properties works throgh the AUTO or MULTI  plugin.

Signed-off-by: Wang, Yang <yang4.wang@intel.com>

* Update.

Signed-off-by: Wang, Yang <yang4.wang@intel.com>

* Add mock head file for loadnetwork with device properties testing.

Signed-off-by: Wang, Yang <yang4.wang@intel.com>

* Update.

Signed-off-by: Wang, Yang <yang4.wang@intel.com>

* Update gtest commit to fix the warning issue appeared in the macro MATCHER_P.

Signed-off-by: Wang, Yang <yang4.wang@intel.com>

Signed-off-by: Wang, Yang <yang4.wang@intel.com>
This commit is contained in:
Wang, Yang 2022-11-07 10:36:25 +08:00 committed by GitHub
parent 8f9fc34ed3
commit 560dba22cb
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 360 additions and 40 deletions

View File

@ -182,13 +182,15 @@ void allowNotImplemented(F&& f) {
ov::AnyMap flatten_sub_properties(const std::string& device, const ov::AnyMap& properties) {
ov::AnyMap result = properties;
bool isVirtualDev = device.find("AUTO") != std::string::npos || device.find("MULTI") != std::string::npos ||
device.find("HETERO") != std::string::npos;
for (auto item = result.begin(); item != result.end();) {
auto parsed = parseDeviceNameIntoConfig(item->first);
if (!item->second.is<ov::AnyMap>()) {
item++;
continue;
}
if (device.find(parsed._deviceName) != std::string::npos) {
if (device == parsed._deviceName) {
// 1. flatten the scondary property for target device
for (auto&& sub_property : item->second.as<ov::AnyMap>()) {
// 1.1 1st level property overides 2nd level property
@ -197,12 +199,12 @@ ov::AnyMap flatten_sub_properties(const std::string& device, const ov::AnyMap& p
result[sub_property.first] = sub_property.second;
}
item = result.erase(item);
} else if (device != "AUTO" && device != "MULTI" && device != "HETERO") {
// 2. remove the secondary property setting for other hard ware device
item = result.erase(item);
} else {
// 3. keep the secondary property for the other virtual devices
} else if (isVirtualDev) {
// 2. keep the secondary property for the other virtual devices
item++;
} else {
// 3. remove the secondary property setting for other hardware device
item = result.erase(item);
}
}
return result;

View File

@ -443,29 +443,22 @@ IExecutableNetworkInternal::Ptr MultiDeviceInferencePlugin::LoadNetworkImpl(cons
LOG_INFO_TAG("load with model path");
}
}
// replace the configure with configure that auto want to pass to device
// and reset the strDevices to support devices
auto validConfigKey = PerfHintsConfig::SupportedKeys();
validConfigKey.push_back(PluginConfigParams::KEY_PERF_COUNT);
validConfigKey.push_back(PluginConfigParams::KEY_EXCLUSIVE_ASYNC_REQUESTS);
// reset the strDevices to support devices
strDevices = "";
for (auto iter = supportDevices.begin(); iter != supportDevices.end(); iter++) {
std::map<std::string, std::string> deviceConfig;
auto& configs = iter->config;
for (auto& config : configs) {
if (std::find(validConfigKey.begin(), validConfigKey.end(), config.first) != validConfigKey.end()) {
deviceConfig.insert({config.first, config.second});
LOG_INFO_TAG("device:%s, config:%s=%s", iter->deviceName.c_str(),
config.first.c_str(), config.second.c_str());
}
}
insertPropToConfig(CONFIG_KEY(ALLOW_AUTO_BATCHING), iter->deviceName, deviceConfig);
insertPropToConfig(CONFIG_KEY(AUTO_BATCH_TIMEOUT), iter->deviceName, deviceConfig);
insertPropToConfig(CONFIG_KEY(CACHE_DIR), iter->deviceName, deviceConfig);
iter->config = deviceConfig;
strDevices += iter->deviceName;
strDevices += ((iter + 1) == supportDevices.end()) ? "" : ",";
LOG_INFO_TAG("device:%s, priority:%ld", iter->deviceName.c_str(), iter->devicePriority);
auto& configs = iter->config;
for (auto& config : configs) {
LOG_INFO_TAG("device:%s, config:%s=%s",
iter->deviceName.c_str(),
config.first.c_str(),
config.second.c_str());
}
insertPropToConfig(CONFIG_KEY(ALLOW_AUTO_BATCHING), iter->deviceName, configs);
insertPropToConfig(CONFIG_KEY(AUTO_BATCH_TIMEOUT), iter->deviceName, configs);
insertPropToConfig(CONFIG_KEY(CACHE_DIR), iter->deviceName, configs);
strDevices += iter->deviceName;
strDevices += ((iter + 1) == supportDevices.end()) ? "" : ",";
LOG_INFO_TAG("device:%s, priority:%ld", iter->deviceName.c_str(), iter->devicePriority);
}
autoSContext->_modelPath = clonedModelPath;
// clone the network, in case of reshape conflict

View File

@ -48,7 +48,7 @@ public:
MOCKTESTMACRO std::string GetDeviceList(const std::map<std::string, std::string>& config) const;
std::list<DeviceInformation> GetValidDevice(const std::vector<DeviceInformation>& metaDevices,
MOCKTESTMACRO std::list<DeviceInformation> GetValidDevice(const std::vector<DeviceInformation>& metaDevices,
const std::string& networkPrecision = METRIC_VALUE(FP32));
MOCKTESTMACRO DeviceInformation SelectDevice(const std::vector<DeviceInformation>& metaDevices,

View File

@ -87,39 +87,40 @@ INSTANTIATE_TEST_SUITE_P(
const std::vector<ov::AnyMap> multiConfigs = {
{ov::device::priorities(CommonTestUtils::DEVICE_CPU)}
};
const std::vector<ov::AnyMap> configsDeviceProperties = {{ov::device::properties("CPU", ov::num_streams(3))}};
const std::vector<ov::AnyMap> configsWithSecondaryProperties = {
{ov::device::properties("CPU", ov::num_streams(4))},
{ov::device::properties("CPU",
ov::enable_profiling(true),
ov::num_streams(4),
ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT))},
{ov::device::properties("CPU",
ov::enable_profiling(true),
ov::num_streams(4),
ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT)),
ov::device::properties("GPU", ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY))}};
const std::vector<ov::AnyMap> multiConfigsWithSecondaryProperties = {
{ov::device::priorities(CommonTestUtils::DEVICE_CPU),
ov::device::properties("CPU",
ov::enable_profiling(true),
ov::num_streams(4),
ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT))},
{ov::device::priorities(CommonTestUtils::DEVICE_CPU),
ov::device::properties("CPU",
ov::enable_profiling(true),
ov::num_streams(4),
ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT)),
ov::device::properties("GPU", ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY))}};
const std::vector<ov::AnyMap> autoConfigsWithSecondaryProperties = {
{ov::device::priorities(CommonTestUtils::DEVICE_CPU),
ov::device::properties("AUTO",
ov::enable_profiling(true),
ov::enable_profiling(false),
ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT))},
{ov::device::priorities(CommonTestUtils::DEVICE_CPU),
ov::device::properties("CPU",
ov::enable_profiling(true),
ov::num_streams(4),
ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT))},
{ov::device::priorities(CommonTestUtils::DEVICE_CPU),
ov::device::properties("CPU",
ov::enable_profiling(true),
ov::num_streams(4),
ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT)),
ov::device::properties("GPU", ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY))},
{ov::device::priorities(CommonTestUtils::DEVICE_CPU),
@ -127,7 +128,7 @@ const std::vector<ov::AnyMap> autoConfigsWithSecondaryProperties = {
ov::enable_profiling(false),
ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY)),
ov::device::properties("CPU",
ov::enable_profiling(true),
ov::num_streams(4),
ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT))},
{ov::device::priorities(CommonTestUtils::DEVICE_CPU),
ov::device::properties("AUTO",
@ -135,7 +136,7 @@ const std::vector<ov::AnyMap> autoConfigsWithSecondaryProperties = {
ov::device::priorities(CommonTestUtils::DEVICE_GPU),
ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY)),
ov::device::properties("CPU",
ov::enable_profiling(true),
ov::num_streams(4),
ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT)),
ov::device::properties("GPU", ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY))}};
@ -270,6 +271,12 @@ INSTANTIATE_TEST_SUITE_P(smoke_AUTO_OVClassLoadNetworkWithSecondaryPropertiesTes
::testing::Combine(::testing::Values("AUTO"),
::testing::ValuesIn(autoConfigsWithSecondaryProperties)));
// IE Class load and check network with ov::device::properties
INSTANTIATE_TEST_SUITE_P(smoke_CPU_OVClassLoadNetworkAndCheckWithSecondaryPropertiesTest,
OVClassLoadNetworkAndCheckSecondaryPropertiesTest,
::testing::Combine(::testing::Values("CPU", "AUTO:CPU", "MULTI:CPU"),
::testing::ValuesIn(configsDeviceProperties)));
INSTANTIATE_TEST_SUITE_P(
smoke_OVClassLoadNetworkTest, OVClassLoadNetworkTest,
::testing::Values("CPU"));

View File

@ -114,6 +114,7 @@ using OVClassSpecificDeviceTestGetConfig = OVClassBaseTestP;
using OVClassLoadNetworkWithCorrectPropertiesTest = OVClassSetDevicePriorityConfigTest;
using OVClassLoadNetworkWithDefaultPropertiesTest = OVClassSetDevicePriorityConfigTest;
using OVClassLoadNetworkWithDefaultIncorrectPropertiesTest = OVClassSetDevicePriorityConfigTest;
using OVClassLoadNetworkAndCheckSecondaryPropertiesTest = OVClassSetDevicePriorityConfigTest;
class OVClassSeveralDevicesTest : public OVPluginTestBase,
public OVClassNetworkTest,
@ -1096,6 +1097,18 @@ TEST_P(OVClassLoadNetworkWithCorrectPropertiesTest, LoadNetworkWithCorrectProper
OV_ASSERT_NO_THROW(ie.compile_model(actualNetwork, target_device, configuration));
}
TEST_P(OVClassLoadNetworkAndCheckSecondaryPropertiesTest, LoadNetworkAndCheckSecondaryPropertiesTest) {
ov::Core ie = createCoreWithTemplate();
ov::CompiledModel model;
OV_ASSERT_NO_THROW(model = ie.compile_model(actualNetwork, target_device, configuration));
auto property = configuration.begin()->second.as<ov::AnyMap>();
auto actual = property.begin()->second.as<int32_t>();
ov::Any value;
OV_ASSERT_NO_THROW(value = model.get_property(ov::num_streams.name()));
int32_t expect = value.as<int32_t>();
ASSERT_EQ(actual, expect);
}
TEST_P(OVClassLoadNetworkWithDefaultPropertiesTest, LoadNetworkWithDefaultPropertiesTest) {
ov::Core ie = createCoreWithTemplate();
ov::CompiledModel model;

View File

@ -0,0 +1,273 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <common_test_utils/test_constants.hpp>
#include <ie_core.hpp>
#include <ie_metric_helpers.hpp>
#include <multi-device/multi_device_config.hpp>
#include <ngraph_functions/subgraph_builders.hpp>
#include <openvino/runtime/core.hpp>
#include "cpp/ie_plugin.hpp"
#include "mock_common.hpp"
#include "plugin/mock_load_network_properties.hpp"
#include "unit_test_utils/mocks/cpp_interfaces/impl/mock_inference_plugin_internal.hpp"
#include "unit_test_utils/mocks/cpp_interfaces/interface/mock_icore.hpp"
#include "unit_test_utils/mocks/cpp_interfaces/interface/mock_iexecutable_network_internal.hpp"
#include "unit_test_utils/mocks/cpp_interfaces/interface/mock_iinference_plugin.hpp"
#include "unit_test_utils/mocks/cpp_interfaces/interface/mock_ivariable_state_internal.hpp"
#include "unit_test_utils/mocks/mock_iinfer_request.hpp"
using ::testing::_;
using ::testing::MatcherCast;
using ::testing::Matches;
using ::testing::NiceMock;
using ::testing::Return;
using ::testing::ReturnRef;
using ::testing::StrEq;
using ::testing::Throw;
using Config = std::map<std::string, std::string>;
// define a matcher if all the elements of subMap are contained in the map.
MATCHER_P(MapContains, subMap, "Check if all the elements of the subMap are contained in the map.") {
if (subMap.empty())
return true;
for (auto& item : subMap) {
auto key = item.first;
auto value = item.second;
auto dest = arg.find(key);
if (dest == arg.end()) {
return false;
} else if (dest->second != value) {
return false;
}
}
return true;
}
using namespace MockMultiDevice;
using ConfigParams = std::tuple<std::string, // virtual device name to load network
std::vector<std::string>, // hardware device name to expect loading network on
Config>; // secondary property setting to device
static std::vector<ConfigParams> testConfigs;
class LoadNetworkWithSecondaryConfigsMockTest : public ::testing::TestWithParam<ConfigParams> {
public:
std::shared_ptr<NiceMock<MockICore>> core;
std::shared_ptr<NiceMock<MockMultiPluginForLoadNetworkWithPropertiesTest>> plugin;
InferenceEngine::CNNNetwork simpleCnnNetwork;
// mock cpu exeNetwork
std::shared_ptr<NiceMock<MockIExecutableNetworkInternal>> cpuMockIExeNet;
ov::SoPtr<IExecutableNetworkInternal> cpuMockExeNetwork;
NiceMock<MockIInferencePlugin>* cpuMockIPlugin;
InferenceEngine::InferencePlugin cpuMockPlugin;
// mock gpu exeNetwork
std::shared_ptr<NiceMock<MockIExecutableNetworkInternal>> gpuMockIExeNet;
ov::SoPtr<IExecutableNetworkInternal> gpuMockExeNetwork;
NiceMock<MockIInferencePlugin>* gpuMockIPlugin;
InferenceEngine::InferencePlugin gpuMockPlugin;
std::shared_ptr<NiceMock<MockIInferRequestInternal>> inferReqInternal;
public:
static std::string getTestCaseName(testing::TestParamInfo<ConfigParams> obj) {
std::string deviceName;
std::vector<std::string> targetDevices;
Config deviceConfigs;
std::tie(deviceName, targetDevices, deviceConfigs) = obj.param;
std::ostringstream result;
result << "_virtual_device_" << deviceName;
result << "_loadnetwork_to_device_";
for (auto& device : targetDevices) {
result << device << "_";
}
auto cpuConfig = deviceConfigs.find("CPU");
auto gpuConfig = deviceConfigs.find("GPU");
result << "device_properties_";
if (cpuConfig != deviceConfigs.end())
result << "CPU_" << cpuConfig->second << "_";
if (gpuConfig != deviceConfigs.end())
result << "GPU_" << gpuConfig->second;
return result.str();
}
static std::vector<ConfigParams> CreateConfigs() {
testConfigs.clear();
testConfigs.push_back(
ConfigParams{"AUTO", {"CPU"}, {{"CPU", "NUM_STREAMS 3"}, {"MULTI_DEVICE_PRIORITIES", "CPU,GPU"}}});
testConfigs.push_back(
ConfigParams{"AUTO", {"CPU", "GPU"}, {{"GPU", "NUM_STREAMS 3"}, {"MULTI_DEVICE_PRIORITIES", "GPU,CPU"}}});
testConfigs.push_back(
ConfigParams{"AUTO:CPU", {"CPU"}, {{"CPU", "NUM_STREAMS 3"}, {"MULTI_DEVICE_PRIORITIES", "CPU"}}});
testConfigs.push_back(
ConfigParams{"AUTO:CPU,GPU", {"CPU"}, {{"CPU", "NUM_STREAMS 3"}, {"MULTI_DEVICE_PRIORITIES", "CPU,GPU"}}});
testConfigs.push_back(
ConfigParams{"AUTO:GPU", {"GPU"}, {{"GPU", "NUM_STREAMS 5"}, {"MULTI_DEVICE_PRIORITIES", "GPU"}}});
testConfigs.push_back(ConfigParams{"AUTO:GPU,CPU",
{"CPU", "GPU"},
{{"GPU", "NUM_STREAMS 5"}, {"MULTI_DEVICE_PRIORITIES", "GPU,CPU"}}});
testConfigs.push_back(
ConfigParams{"MULTI:CPU", {"CPU"}, {{"CPU", "NUM_STREAMS 3"}, {"MULTI_DEVICE_PRIORITIES", "CPU"}}});
testConfigs.push_back(ConfigParams{"MULTI:CPU,GPU",
{"CPU", "GPU"},
{{"CPU", "NUM_STREAMS 3"}, {"MULTI_DEVICE_PRIORITIES", "CPU,GPU"}}});
testConfigs.push_back(
ConfigParams{"MULTI:GPU", {"GPU"}, {{"GPU", "NUM_STREAMS 5"}, {"MULTI_DEVICE_PRIORITIES", "GPU"}}});
testConfigs.push_back(ConfigParams{"MULTI:GPU,CPU",
{"CPU", "GPU"},
{{"GPU", "NUM_STREAMS 5"}, {"MULTI_DEVICE_PRIORITIES", "GPU,CPU"}}});
return testConfigs;
}
void TearDown() override {
core.reset();
plugin.reset();
}
void SetUp() override {
// prepare cpuMockExeNetwork
cpuMockIExeNet = std::make_shared<NiceMock<MockIExecutableNetworkInternal>>();
auto cpuMockIPluginPtr = std::make_shared<NiceMock<MockIInferencePlugin>>();
ON_CALL(*cpuMockIPluginPtr, LoadNetwork(MatcherCast<const CNNNetwork&>(_), _))
.WillByDefault(Return(cpuMockIExeNet));
cpuMockPlugin = InferenceEngine::InferencePlugin{cpuMockIPluginPtr, {}};
// remove annoying ON CALL message
EXPECT_CALL(*cpuMockIPluginPtr, LoadNetwork(MatcherCast<const CNNNetwork&>(_), _)).Times(1);
cpuMockExeNetwork = cpuMockPlugin.LoadNetwork(CNNNetwork{}, {});
// prepare gpuMockExeNetwork
gpuMockIExeNet = std::make_shared<NiceMock<MockIExecutableNetworkInternal>>();
auto gpuMockIPluginPtr = std::make_shared<NiceMock<MockIInferencePlugin>>();
ON_CALL(*gpuMockIPluginPtr, LoadNetwork(MatcherCast<const CNNNetwork&>(_), _))
.WillByDefault(Return(gpuMockIExeNet));
gpuMockPlugin = InferenceEngine::InferencePlugin{gpuMockIPluginPtr, {}};
// remove annoying ON CALL message
EXPECT_CALL(*gpuMockIPluginPtr, LoadNetwork(MatcherCast<const CNNNetwork&>(_), _)).Times(1);
gpuMockExeNetwork = gpuMockPlugin.LoadNetwork(CNNNetwork{}, {});
// prepare mockicore and cnnNetwork for loading
core = std::shared_ptr<NiceMock<MockICore>>(new NiceMock<MockICore>());
auto* origin_plugin = new NiceMock<MockMultiPluginForLoadNetworkWithPropertiesTest>();
plugin = std::shared_ptr<NiceMock<MockMultiPluginForLoadNetworkWithPropertiesTest>>(origin_plugin);
// replace core with mock Icore
plugin->SetCore(core);
inferReqInternal = std::make_shared<NiceMock<MockIInferRequestInternal>>();
ON_CALL(*cpuMockIExeNet.get(), CreateInferRequest()).WillByDefault(Return(inferReqInternal));
ON_CALL(*gpuMockIExeNet.get(), CreateInferRequest()).WillByDefault(Return(inferReqInternal));
ON_CALL(*cpuMockIExeNet.get(), GetMetric(StrEq(METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS))))
.WillByDefault(Return("0"));
ON_CALL(*gpuMockIExeNet.get(), GetMetric(StrEq(METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS))))
.WillByDefault(Return("0"));
std::vector<std::string> availableDevs = {"CPU", "GPU"};
ON_CALL(*core, GetAvailableDevices()).WillByDefault(Return(availableDevs));
std::vector<std::string> metrics = {METRIC_KEY(SUPPORTED_CONFIG_KEYS)};
ON_CALL(*core, GetMetric(_, StrEq(METRIC_KEY(SUPPORTED_METRICS)), _)).WillByDefault(Return(metrics));
std::vector<std::string> configKeys = {"SUPPORTED_CONFIG_KEYS", "NUM_STREAMS"};
ON_CALL(*core, GetMetric(_, StrEq(METRIC_KEY(SUPPORTED_CONFIG_KEYS)), _)).WillByDefault(Return(configKeys));
ON_CALL(*plugin, ParseMetaDevices)
.WillByDefault(
[this](const std::string& priorityDevices, const std::map<std::string, std::string>& config) {
return plugin->MultiDeviceInferencePlugin::ParseMetaDevices(priorityDevices, config);
});
ON_CALL(*plugin, SelectDevice)
.WillByDefault([this](const std::vector<DeviceInformation>& metaDevices,
const std::string& netPrecision,
unsigned int priority) {
return plugin->MultiDeviceInferencePlugin::SelectDevice(metaDevices, netPrecision, priority);
});
ON_CALL(*plugin, GetValidDevice)
.WillByDefault([this](const std::vector<DeviceInformation>& metaDevices, const std::string& netPrecision) {
std::list<DeviceInformation> devices(metaDevices.begin(), metaDevices.end());
return devices;
});
ON_CALL(*core, GetSupportedConfig)
.WillByDefault([](const std::string& device, const std::map<std::string, std::string>& fullConfigs) {
auto item = fullConfigs.find(device);
Config deviceConfigs;
if (item != fullConfigs.end()) {
std::stringstream strConfigs(item->second);
ov::util::Read<Config>{}(strConfigs, deviceConfigs);
}
return deviceConfigs;
});
ON_CALL(*plugin, GetDeviceList).WillByDefault([this](const std::map<std::string, std::string>& config) {
return plugin->MultiDeviceInferencePlugin::GetDeviceList(config);
});
ON_CALL(*plugin, SelectDevice)
.WillByDefault([this](const std::vector<DeviceInformation>& metaDevices,
const std::string& netPrecision,
unsigned int Priority) {
return plugin->MultiDeviceInferencePlugin::SelectDevice(metaDevices, netPrecision, Priority);
});
std::vector<std::string> cpuCability{"FP32", "FP16", "INT8", "BIN"};
std::vector<std::string> gpuCability{"FP32", "FP16", "BATCHED_BLOB", "BIN", "INT8"};
ON_CALL(*core, GetMetric(StrEq(CommonTestUtils::DEVICE_CPU), StrEq(METRIC_KEY(OPTIMIZATION_CAPABILITIES)), _))
.WillByDefault(Return(cpuCability));
ON_CALL(*core, GetMetric(StrEq(CommonTestUtils::DEVICE_GPU), StrEq(METRIC_KEY(OPTIMIZATION_CAPABILITIES)), _))
.WillByDefault(Return(gpuCability));
ON_CALL(*core,
LoadNetwork(::testing::Matcher<const InferenceEngine::CNNNetwork&>(_),
::testing::Matcher<const std::string&>(StrEq("CPU")),
::testing::Matcher<const std::map<std::string, std::string>&>(_)))
.WillByDefault(Return(cpuMockExeNetwork));
ON_CALL(*core,
LoadNetwork(::testing::Matcher<const InferenceEngine::CNNNetwork&>(_),
::testing::Matcher<const std::string&>(StrEq("GPU")),
::testing::Matcher<const std::map<std::string, std::string>&>(_)))
.WillByDefault(Return(gpuMockExeNetwork));
std::shared_ptr<ngraph::Function> simpleNetwork = ngraph::builder::subgraph::makeSingleConv();
ASSERT_NO_THROW(simpleCnnNetwork = InferenceEngine::CNNNetwork(simpleNetwork));
}
};
TEST_P(LoadNetworkWithSecondaryConfigsMockTest, LoadNetworkWithSecondaryConfigsTest) {
std::string device;
std::vector<std::string> targetDevices;
Config config;
std::tie(device, targetDevices, config) = this->GetParam();
if (device.find("AUTO") != std::string::npos)
plugin->SetName("AUTO");
if (device.find("MULTI") != std::string::npos)
plugin->SetName("MULTI");
for (auto& deviceName : targetDevices) {
auto item = config.find(deviceName);
Config deviceConfigs;
if (item != config.end()) {
std::stringstream strConfigs(item->second);
// Parse the device properties to common property into deviceConfigs.
ov::util::Read<Config>{}(strConfigs, deviceConfigs);
}
EXPECT_CALL(
*core,
LoadNetwork(::testing::Matcher<const InferenceEngine::CNNNetwork&>(_),
::testing::Matcher<const std::string&>(deviceName),
::testing::Matcher<const std::map<std::string, std::string>&>(MapContains(deviceConfigs))))
.Times(1);
}
ASSERT_NO_THROW(plugin->LoadExeNetworkImpl(simpleCnnNetwork, config));
}
INSTANTIATE_TEST_SUITE_P(smoke_AutoMock_LoadNetworkWithSecondaryConfigs,
LoadNetworkWithSecondaryConfigsMockTest,
::testing::ValuesIn(LoadNetworkWithSecondaryConfigsMockTest::CreateConfigs()),
LoadNetworkWithSecondaryConfigsMockTest::getTestCaseName);

View File

@ -19,4 +19,4 @@ public:
MOCK_METHOD((std::vector<DeviceInformation>), ParseMetaDevices,
(const std::string&, (const std::map<std::string, std::string>&)), (const, override));
};
}// namespace MockMultiDevice
}// namespace MockMultiDevice

View File

@ -0,0 +1,32 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <gmock/gmock.h>
#include <iostream>
#include "ie_icore.hpp"
#include "plugin.hpp"
using namespace MockMultiDevicePlugin;
namespace MockMultiDevice {
class MockMultiPluginForLoadNetworkWithPropertiesTest : public MultiDeviceInferencePlugin {
public:
MOCK_METHOD((std::string), GetDeviceList, ((const std::map<std::string, std::string>&)), (const, override));
MOCK_METHOD(DeviceInformation,
SelectDevice,
((const std::vector<DeviceInformation>&), const std::string&, unsigned int),
(override));
MOCK_METHOD((std::list<DeviceInformation>),
GetValidDevice,
((const std::vector<DeviceInformation>&), const std::string&),
(override));
MOCK_METHOD((std::vector<DeviceInformation>),
ParseMetaDevices,
(const std::string&, (const std::map<std::string, std::string>&)),
(const, override));
};
} // namespace MockMultiDevice

@ -1 +1 @@
Subproject commit f8bd7cebfb3e6aeefb1d1d93e46a461471e15f57
Subproject commit d269d902e4c3cd02f3e731e1e2ff8307352817a4