removed unsed var and fixed mixup from code shuffling (#10492)

This commit is contained in:
Maxim Shevtsov
2022-02-18 10:05:23 +03:00
committed by GitHub
parent 2ac15eae3d
commit dcd6e3e961

View File

@@ -827,7 +827,6 @@ InferenceEngine::IExecutableNetworkInternal::Ptr AutoBatchInferencePlugin::LoadN
if (!metaDevice.batchForDevice) {
unsigned int requests = 0;
unsigned int optimalBatchSize = 0;
// batch size is not set explicitly via device name e.g. BATCH:GPU(4)
// let's query the optimal batch size
std::map<std::string, InferenceEngine::Parameter> options;
@@ -839,7 +838,7 @@ InferenceEngine::IExecutableNetworkInternal::Ptr AutoBatchInferencePlugin::LoadN
if (reqs != config.end())
requests = static_cast<unsigned int>(PerfHintsConfig::CheckPerformanceHintRequestValue(reqs->second));
if (requests)
optBatchSize = std::max(1u, std::min(requests, optimalBatchSize));
optBatchSize = std::max(1u, std::min(requests, optBatchSize));
if (optBatchSize > 2) // batching is usually in-efficient for batch<4 (as batch1 kernels are heavily optimized)
metaDevice.batchForDevice = optBatchSize;
else