From be588400e522917a377e9aa431d292d230f8b319 Mon Sep 17 00:00:00 2001 From: myshevts Date: Thu, 2 Dec 2021 17:05:10 +0300 Subject: [PATCH] revert the timeout back to avoid mixing the studies, fixed the footprint calc --- inference-engine/src/auto_batch/auto_batch.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/inference-engine/src/auto_batch/auto_batch.cpp b/inference-engine/src/auto_batch/auto_batch.cpp index 84f305b343b..3946e8fa739 100644 --- a/inference-engine/src/auto_batch/auto_batch.cpp +++ b/inference-engine/src/auto_batch/auto_batch.cpp @@ -288,7 +288,7 @@ InferenceEngine::IInferRequestInternal::Ptr AutoBatchExecutableNetwork::CreateIn workerRequestPtr->_thread = std::thread([workerRequestPtr, this] { while (1) { std::unique_lock lock(workerRequestPtr->_mutex); - auto status = workerRequestPtr->_cond.wait_for(lock, std::chrono::milliseconds(100)); + auto status = workerRequestPtr->_cond.wait_for(lock, std::chrono::milliseconds(1000)); // as we pop the tasks from the queue only here // it is ok to call unsafe_size (as the _tasks can only grow in parallel) const int sz = workerRequestPtr->_tasks.unsafe_size(); @@ -554,7 +554,7 @@ InferenceEngine::IExecutableNetworkInternal::Ptr AutoBatchInferencePlugin::LoadN const auto stats = pCore->GetMetric(device, GPU_METRIC_KEY(MEMORY_STATISTICS)).as>(); for (auto s : stats) footprint += s.second; - std::cout << "!!!!!!!!!!!!!! (FOOTPRINT) " << message << " : " << footprint/1024 << " MB" << std::endl; + std::cout << "!!!!!!!!!!!!!! (FOOTPRINT) " << message << " : " << footprint/1024/1024 << " MB" << std::endl; return footprint; };