Bell/use cpu for dynamic models (#17149)

* clean up multi code path

Signed-off-by: fishbell <bell.song@intel.com>

* clang

Signed-off-by: fishbell <bell.song@intel.com>

* potential locking issue

Signed-off-by: fishbell <bell.song@intel.com>

* remove unecessary variable

Signed-off-by: fishbell <bell.song@intel.com>

* clear redundunt return syntax

Signed-off-by: fishbell <bell.song@intel.com>

* still use cpu for dynamic models

Signed-off-by: fishbell <bell.song@intel.com>

* merge master

Signed-off-by: fishbell <bell.song@intel.com>

---------

Signed-off-by: fishbell <bell.song@intel.com>
This commit is contained in:
yanlan song 2023-04-25 05:01:11 +08:00 committed by GitHub
parent 5c21dcec4d
commit 64b5a4595a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 4 additions and 24 deletions

View File

@ -821,23 +821,6 @@ std::vector<DeviceInformation> MultiDeviceInferencePlugin::FilterDeviceByNetwork
return false; return false;
}; };
auto isOutputDynamic = [&]() {
for (size_t i = 0; i < model->inputs().size() ; i++) {
if (model->input(i).get_partial_shape().is_dynamic()) {
// any input is dynamic
return false;
}
}
for (size_t i = 0; i < model->outputs().size() ; i++) {
if (model->output(i).get_partial_shape().is_dynamic()) {
// any output is dynamic
LOG_INFO_TAG("dynamic output model");
return true;
}
}
return false;
};
// Check if CPU is in candidate list // Check if CPU is in candidate list
auto cpuiter = std::find_if(metaDevices.begin(), metaDevices.end(), [](const DeviceInformation& deviceInfo) { auto cpuiter = std::find_if(metaDevices.begin(), metaDevices.end(), [](const DeviceInformation& deviceInfo) {
return deviceInfo.deviceName.find("CPU") != std::string::npos; return deviceInfo.deviceName.find("CPU") != std::string::npos;
@ -846,7 +829,7 @@ std::vector<DeviceInformation> MultiDeviceInferencePlugin::FilterDeviceByNetwork
// If CPU is in candidate list, load dynamic network to CPU first // If CPU is in candidate list, load dynamic network to CPU first
// For MULTI do not only load stateful network to CPU // For MULTI do not only load stateful network to CPU
// For AUTO CTPUT only load stateful network to CPU // For AUTO CTPUT only load stateful network to CPU
if (((model->is_dynamic() && !isOutputDynamic()) || (isStateful() && _LogTag != "MULTI")) && cpuiter != metaDevices.end()) { if (((model->is_dynamic()) || (isStateful() && _LogTag != "MULTI")) && cpuiter != metaDevices.end()) {
filterDevice.push_back(*cpuiter); filterDevice.push_back(*cpuiter);
return filterDevice; return filterDevice;
} }

View File

@ -86,18 +86,15 @@ TEST_P(DynamicOutputInferenceTest, CanSelectCorrectTargetDeviceandInitizeBlobWit
ASSERT_NO_THROW(exeNetwork = plugin->LoadNetwork(cnnNet, config)); ASSERT_NO_THROW(exeNetwork = plugin->LoadNetwork(cnnNet, config));
std::shared_ptr<InferenceEngine::IInferRequestInternal> auto_request; std::shared_ptr<InferenceEngine::IInferRequestInternal> auto_request;
ASSERT_NO_THROW(auto_request = exeNetwork->CreateInferRequest()); ASSERT_NO_THROW(auto_request = exeNetwork->CreateInferRequest());
for (auto & iter : exeNetwork->GetOutputsInfo()) {
auto outBlob = auto_request->GetBlob(iter.first);
ASSERT_NE(outBlob->size(), 0);
}
ASSERT_NO_THROW(auto_request->StartAsync()); ASSERT_NO_THROW(auto_request->StartAsync());
ASSERT_NO_THROW(auto_request->Wait(0));
} }
const std::vector<DynamicOutputConfigParams> testConfigs = { const std::vector<DynamicOutputConfigParams> testConfigs = {
DynamicOutputConfigParams {false, "CPU,GPU", {ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)}, DynamicOutputConfigParams {false, "CPU,GPU", {ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)},
std::vector<std::string>{"CPU", "GPU"}}, std::vector<std::string>{"CPU"}},
DynamicOutputConfigParams {true, "CPU,GPU", {ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)}, DynamicOutputConfigParams {true, "CPU,GPU", {ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)},
std::vector<std::string>{"CPU", "GPU"}}, std::vector<std::string>{"CPU"}},
}; };
INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, DynamicOutputInferenceTest, INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, DynamicOutputInferenceTest,