add Debug statistic log for devices infer nums (#9825)

* add statics log

Signed-off-by: Hu, Yuan2 <yuan2.hu@intel.com>

* change LOG_DEBUG to LOG_INFO

Signed-off-by: Hu, Yuan2 <yuan2.hu@intel.com>

* fix type

Signed-off-by: fishbell <bell.song@intel.com>

Co-authored-by: fishbell <bell.song@intel.com>
This commit is contained in:
Yuan Hu
2022-02-01 19:18:29 +08:00
committed by GitHub
parent f25c450534
commit 8892b7b327
3 changed files with 20 additions and 2 deletions

View File

@@ -79,6 +79,7 @@ MultiDeviceAsyncInferRequest::MultiDeviceAsyncInferRequest(
}
if (_needPerfCounters)
_perfMap = _workerInferRequest->_inferRequest->GetPerformanceCounts();
_workerInferRequest->_inferCount++;
}}
};
}

View File

@@ -160,6 +160,7 @@ MultiDeviceExecutableNetwork::MultiDeviceExecutableNetwork(const std::string&
, _context(context)
, _workModeIsAUTO(true)
, _network(network) {
LOG_INFO("[AUTOPLUGIN]ExecutableNetwork start");
if (_multiPlugin->GetCore() == nullptr) {
IE_THROW() << "Please, work with " << _multiPlugin->GetName() << " device via InferencEngine::Core object";
}
@@ -282,8 +283,10 @@ MultiDeviceExecutableNetwork::MultiDeviceExecutableNetwork(const std::string&
// second, check the idle queue if all requests are in place
size_t destroynum = 0;
std::pair<int, WorkerInferRequest*> worker;
while (_idleWorkerRequests["CPU_HELP"].try_pop(worker))
while (_idleWorkerRequests["CPU_HELP"].try_pop(worker)) {
destroynum++;
_cpuHelpInferCount += worker.second->_inferCount;
}
if (destroynum == _workerRequests["CPU_HELP"].size()) {
std::lock_guard<std::mutex> lock(_confMutex);
_workerRequests["CPU_HELP"].clear();
@@ -520,10 +523,22 @@ MultiDeviceExecutableNetwork::~MultiDeviceExecutableNetwork() {
// stop accepting any idle requests back (for re-scheduling)
idleWorker.second.set_capacity(0);
}
for (auto&& _workerRequest : _workerRequests) {
unsigned int count = 0;
for (auto& request : _workerRequest.second) {
count += request._inferCount;
}
if (_workerRequest.first == "CPU_HELP") {
LOG_INFO("[AUTOPLUGIN]CPU_HELP:infer:%ld", _cpuHelpInferCount + count);
} else {
LOG_INFO("[AUTOPLUGIN]%s:infer:%ld", _workerRequest.first.c_str(), count);
}
}
{
std::lock_guard<std::mutex> lock(_confMutex);
_workerRequests.clear();
}
LOG_INFO("[AUTOPLUGIN]ExecutableNetwork end");
}
std::shared_ptr<InferenceEngine::RemoteContext> MultiDeviceExecutableNetwork::GetContext() const {

View File

@@ -84,6 +84,7 @@ public:
InferenceEngine::SoIInferRequestInternal _inferRequest;
InferenceEngine::Task _task;
std::exception_ptr _exceptionPtr = nullptr;
unsigned int _inferCount = 0;
int _index = 0;
};
using NotBusyWorkerRequests = InferenceEngine::ThreadSafeBoundedPriorityQueue<std::pair<int, WorkerInferRequest*>>;
@@ -157,6 +158,7 @@ private:
mutable std::mutex _confMutex;
bool _exitFlag = {false};
const InferenceEngine::CNNNetwork _network;
int _cpuHelpInferCount = 0;
};
} // namespace MultiDevicePlugin
} // namespace MultiDevicePlugin