add tag for log (#11887)
* add tag for log * cumulative log tag output AUTOPLUGIN * add use comment for log_xxx_tag and use AUTO OR MULTI for log tag
This commit is contained in:
parent
c0a2c98a45
commit
44da3f06c4
@ -80,7 +80,7 @@ IE::Parameter AutoExecutableNetwork::GetMetric(const std::string& name) const {
|
||||
_autoSContext->_core->GetConfig(deviceInfo.deviceName,
|
||||
CONFIG_KEY(PERFORMANCE_HINT)).as<std::string>() == CONFIG_VALUE(THROUGHPUT);
|
||||
} catch (...) {
|
||||
LOG_DEBUG("[AUTOPLUGIN]GetMetric:%s for %s", "PERF_HINT config not supported",
|
||||
LOG_DEBUG_TAG("GetMetric:%s for %s", "PERF_HINT config not supported",
|
||||
deviceInfo.deviceName.c_str());
|
||||
}
|
||||
const auto& mode = deviceInfo.config.find(CONFIG_KEY(PERFORMANCE_HINT));
|
||||
@ -96,16 +96,16 @@ IE::Parameter AutoExecutableNetwork::GetMetric(const std::string& name) const {
|
||||
options).as<std::tuple<unsigned int, unsigned int>>();
|
||||
upperBoundStreamsNum = std::get<1>(rangeOfStreams);
|
||||
} catch (const IE::Exception& iie) {
|
||||
LOG_DEBUG("[AUTOPLUGIN] GetMetric RANGE_FOR_STREAMS failed");
|
||||
LOG_DEBUG_TAG("GetMetric RANGE_FOR_STREAMS failed");
|
||||
}
|
||||
if (!_autoSContext->_batchingDisabled) {
|
||||
try {
|
||||
optimalBatchSize = _autoSContext->_core->GetMetric(deviceInfo.deviceName,
|
||||
METRIC_KEY(OPTIMAL_BATCH_SIZE), options).as<unsigned int>();
|
||||
LOG_DEBUG("[AUTOPLUGIN]BATCHING:%s:%ld", "optimal batch size",
|
||||
LOG_DEBUG_TAG("BATCHING:%s:%ld", "optimal batch size",
|
||||
optimalBatchSize);
|
||||
} catch (...) {
|
||||
LOG_DEBUG("[AUTOPLUGIN]BATCHING:%s", "metric OPTIMAL_BATCH_SIZE not supported");
|
||||
LOG_DEBUG_TAG("BATCHING:%s", "metric OPTIMAL_BATCH_SIZE not supported");
|
||||
}
|
||||
}
|
||||
if (optimalBatchSize > 1) {
|
||||
@ -123,13 +123,13 @@ IE::Parameter AutoExecutableNetwork::GetMetric(const std::string& name) const {
|
||||
requests = static_cast<unsigned int>
|
||||
(IE::PerfHintsConfig::CheckPerformanceHintRequestValue(reqs->second));
|
||||
}
|
||||
LOG_DEBUG("[AUTOPLUGIN]BATCHING:%s:%ld", "user requested size", requests);
|
||||
LOG_DEBUG_TAG("BATCHING:%s:%ld", "user requested size", requests);
|
||||
if (!requests) { // no limitations from user
|
||||
requests = optimalBatchSize * upperBoundStreamsNum * 2;
|
||||
LOG_DEBUG("[AUTOPLUGIN]BATCHING:%s:%ld", "deduced size:", requests);
|
||||
LOG_DEBUG_TAG("BATCHING:%s:%ld", "deduced size:", requests);
|
||||
}
|
||||
} catch (const IE::Exception& iie) {
|
||||
LOG_WARNING("[AUTOPLUGIN]deduce optimal infer requset num for auto-batch failed :%s",
|
||||
LOG_WARNING_TAG("deduce optimal infer requset num for auto-batch failed :%s",
|
||||
iie.what());
|
||||
}
|
||||
real = (std::max)(requests, optimalBatchSize);
|
||||
|
@ -99,7 +99,8 @@ void AutoSchedule::GenerateWorkers(const std::string& device,
|
||||
}
|
||||
|
||||
void AutoSchedule::init(const ScheduleContext::Ptr& sContext) {
|
||||
LOG_INFO("[AUTOPLUGIN]ExecutableNetwork start");
|
||||
_LogTag = sContext->_LogTag;
|
||||
LOG_INFO_TAG("ExecutableNetwork start");
|
||||
// initialize cpuHelpReleasetime
|
||||
_cpuHelpReleaseTime = std::chrono::steady_clock::now();
|
||||
_multiSContext = std::dynamic_pointer_cast<MultiScheduleContext>(sContext);
|
||||
@ -147,7 +148,7 @@ void AutoSchedule::init(const ScheduleContext::Ptr& sContext) {
|
||||
// remove CPU from default candidate list for Cumulative Throughput mode
|
||||
if (GPUNums >= 2 && CPUNums > 0) {
|
||||
validDevices.erase(itCPUDevice);
|
||||
LOG_INFO("[AUTOPLUGIN]:GPUNums:%d, remove CPU from default candidate list for "
|
||||
LOG_INFO_TAG("GPUNums:%d, remove CPU from default candidate list for "
|
||||
"CUMULATIVE_THROUGHPUT",
|
||||
GPUNums);
|
||||
}
|
||||
@ -160,7 +161,8 @@ void AutoSchedule::init(const ScheduleContext::Ptr& sContext) {
|
||||
}
|
||||
|
||||
_loadContext[ACTUALDEVICE].deviceInfo.deviceName = deviceName;
|
||||
_loadContext[ACTUALDEVICE].deviceInfo.config[CONFIG_KEY(PERFORMANCE_HINT)] = InferenceEngine::PluginConfigParams::THROUGHPUT;
|
||||
_loadContext[ACTUALDEVICE].deviceInfo.config[CONFIG_KEY(PERFORMANCE_HINT)] =
|
||||
InferenceEngine::PluginConfigParams::CUMULATIVE_THROUGHPUT;
|
||||
_loadContext[ACTUALDEVICE].deviceInfo.config[CONFIG_KEY(PERF_COUNT)] =
|
||||
_autoSContext->_needPerfCounters ? InferenceEngine::PluginConfigParams::YES
|
||||
: InferenceEngine::PluginConfigParams::NO;
|
||||
@ -171,7 +173,7 @@ void AutoSchedule::init(const ScheduleContext::Ptr& sContext) {
|
||||
_loadContext[ACTUALDEVICE].networkPrecision,
|
||||
_autoSContext->_modelPriority);
|
||||
}
|
||||
LOG_INFO("[AUTOPLUGIN]:select device:%s", _loadContext[ACTUALDEVICE].deviceInfo.deviceName.c_str());
|
||||
LOG_INFO_TAG("select device:%s", _loadContext[ACTUALDEVICE].deviceInfo.deviceName.c_str());
|
||||
bool isActualDevCPU =
|
||||
_loadContext[ACTUALDEVICE].deviceInfo.deviceName.find("CPU") !=std::string::npos;
|
||||
// if Actual device is CPU, disabled _loadContext[CPU], only use _loadContext[ACTUALDEVICE]
|
||||
@ -187,7 +189,7 @@ void AutoSchedule::init(const ScheduleContext::Ptr& sContext) {
|
||||
_loadContext[CPU].deviceInfo.config[CONFIG_KEY(PERFORMANCE_HINT)] =
|
||||
IE::PluginConfigParams::LATENCY;
|
||||
_loadContext[CPU].workName = "CPU_HELP";
|
||||
LOG_INFO("[AUTOPLUGIN]:will load CPU for accelerator");
|
||||
LOG_INFO_TAG("will load CPU for accelerator");
|
||||
} else {
|
||||
_loadContext[CPU].isEnabled = false;
|
||||
}
|
||||
@ -214,7 +216,7 @@ void AutoSchedule::init(const ScheduleContext::Ptr& sContext) {
|
||||
}
|
||||
contextPtr->isAlready = true;
|
||||
auto& deviceName = contextPtr->deviceInfo.deviceName;
|
||||
LOG_INFO("[AUTOPLUGIN]:device:%s loading Network finished", deviceName.c_str());
|
||||
LOG_INFO_TAG("device:%s loading Network finished", deviceName.c_str());
|
||||
if (!isCumulative) {
|
||||
auto supported_config_keys =
|
||||
_autoSContext->_core->GetMetric(deviceName, METRIC_KEY(SUPPORTED_CONFIG_KEYS))
|
||||
@ -223,8 +225,8 @@ void AutoSchedule::init(const ScheduleContext::Ptr& sContext) {
|
||||
std::lock_guard<std::mutex> lock(_autoSContext->_confMutex);
|
||||
for (const auto& cfg : supported_config_keys) {
|
||||
try {
|
||||
LOG_DEBUG(
|
||||
"[AUTOPLUGIN]:device:%s, GetConfig:%s=%s",
|
||||
LOG_DEBUG_TAG(
|
||||
"device:%s, GetConfig:%s=%s",
|
||||
deviceName.c_str(),
|
||||
cfg.c_str(),
|
||||
contextPtr->executableNetwork->GetConfig(cfg).as<std::string>().c_str());
|
||||
@ -307,11 +309,11 @@ void AutoSchedule::init(const ScheduleContext::Ptr& sContext) {
|
||||
_cpuHelpFps = cpuHelpAllStartTimes.size() * 1000 / durtation.count();
|
||||
}
|
||||
});
|
||||
LOG_INFO("[AUTOPLUGIN] release all work requests of CPU_HELP");
|
||||
LOG_INFO_TAG("release all work requests of CPU_HELP");
|
||||
_workerRequests["CPU_HELP"].clear();
|
||||
_loadContext[CPU].executableNetwork._ptr.reset();
|
||||
_loadContext[CPU].executableNetwork._so.reset();
|
||||
LOG_INFO("[AUTOPLUGIN]:helper released!!");
|
||||
LOG_INFO_TAG("helper released!!");
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -340,17 +342,17 @@ void AutoSchedule::TryToLoadNetWork(AutoLoadContext& context, const std::string&
|
||||
try {
|
||||
maxNumThreads = _autoSContext->_core->GetConfig(device, GPU_CONFIG_KEY(MAX_NUM_THREADS)).as<int>();
|
||||
} catch (...) {
|
||||
LOG_DEBUG("[AUTOPLUGIN]: cannot get MAX_NUM_THREADS from GPU");
|
||||
LOG_DEBUG_TAG("cannot get MAX_NUM_THREADS from GPU");
|
||||
}
|
||||
if (maxNumThreads == static_cast<int>(std::thread::hardware_concurrency())) {
|
||||
int threadNum = maxNumThreads / 2;
|
||||
deviceConfig[GPU_CONFIG_KEY(MAX_NUM_THREADS)] = std::to_string(threadNum).c_str();
|
||||
LOG_DEBUG("[AUTO PLUGIN]:gpu streams number for compiling: %s",
|
||||
LOG_DEBUG_TAG("gpu streams number for compiling: %s",
|
||||
deviceConfig[GPU_CONFIG_KEY(MAX_NUM_THREADS)].c_str());
|
||||
} else {
|
||||
// user set the compiling threads num
|
||||
// use the user's val anyway
|
||||
LOG_DEBUG("[AUTOPLUGIN]:user defined compiling threads: %d", maxNumThreads);
|
||||
LOG_DEBUG_TAG("user defined compiling threads: %d", maxNumThreads);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -417,7 +419,7 @@ void AutoSchedule::TryToLoadNetWork(AutoLoadContext& context, const std::string&
|
||||
return;
|
||||
}
|
||||
}
|
||||
LOG_DEBUG("[AUTOPLUGIN] try to load %s", context.deviceInfo.deviceName.c_str());
|
||||
LOG_DEBUG_TAG("try to load %s", context.deviceInfo.deviceName.c_str());
|
||||
// try to load this candidate device
|
||||
TryToLoadNetWork(context, modelPath, network);
|
||||
}
|
||||
@ -446,10 +448,10 @@ void AutoSchedule::WaitFirstNetworkReady() {
|
||||
//print errMessage
|
||||
for (int i = CONTEXTNUM - 1; i >= 0; i--) {
|
||||
if (_loadContext[i].isEnabled) {
|
||||
LOG_ERROR("[AUTOPLUGIN] load failed, %s", _loadContext[i].errMessage.c_str());
|
||||
LOG_ERROR_TAG("load failed, %s", _loadContext[i].errMessage.c_str());
|
||||
}
|
||||
}
|
||||
IE_THROW() << "[AUTOPLUGIN] load all devices failed";
|
||||
IE_THROW() << GetLogTag() << "load all devices failed";
|
||||
}
|
||||
|
||||
void AutoSchedule::WaitActualNetworkReady() const {
|
||||
@ -535,7 +537,7 @@ AutoSchedule::~AutoSchedule() {
|
||||
_autoSContext->_plugin->UnregisterPriority(_autoSContext->_modelPriority,
|
||||
_loadContext[ACTUALDEVICE].deviceInfo.uniqueName);
|
||||
|
||||
LOG_INFO("[AUTOPLUGIN]ExecutableNetwork end");
|
||||
LOG_INFO_TAG("ExecutableNetwork end");
|
||||
}
|
||||
|
||||
IInferPtr AutoSchedule::CreateInferRequest() {
|
||||
|
@ -108,6 +108,7 @@ public:
|
||||
using Ptr = std::shared_ptr<ScheduleContext>;
|
||||
std::shared_ptr<IE::ICore> _core;
|
||||
std::weak_ptr<IExecNetwork> _executableNetwork;
|
||||
std::string _LogTag;
|
||||
virtual ~ScheduleContext() = default;
|
||||
};
|
||||
|
||||
|
@ -42,4 +42,8 @@ void ExecutableNetwork::SetExeNetworkForContext() {
|
||||
_sContext->_executableNetwork = shared_from_this();
|
||||
});
|
||||
}
|
||||
|
||||
std::string ExecutableNetwork::GetLogTag() const noexcept {
|
||||
return _sContext->_LogTag;
|
||||
}
|
||||
} // namespace MultiDevicePlugin
|
||||
|
@ -26,6 +26,8 @@ public:
|
||||
const std::vector<std::shared_ptr<const ov::Node>>& outputs) override;
|
||||
~ExecutableNetwork() override;
|
||||
|
||||
protected:
|
||||
std::string GetLogTag() const noexcept;
|
||||
private:
|
||||
Schedule::Ptr _schedule;
|
||||
ScheduleContext::Ptr _sContext;
|
||||
|
@ -16,6 +16,7 @@ thread_local const char* MultiSchedule::_thisPreferredDeviceName = "";
|
||||
|
||||
void MultiSchedule::init(const ScheduleContext::Ptr& sContext) {
|
||||
_cpuHelpReleaseTime = std::chrono::steady_clock::now();
|
||||
_LogTag = sContext->_LogTag;
|
||||
_multiSContext = std::dynamic_pointer_cast<MultiScheduleContext>(sContext);
|
||||
for (auto&& networkValue : _multiSContext->_networksPerDevice) {
|
||||
auto& device = networkValue.first;
|
||||
@ -241,16 +242,16 @@ MultiSchedule::~MultiSchedule() {
|
||||
reqAllStartTimes.sort(std::less<Time>());
|
||||
reqAllEndTimes.sort(std::less<Time>());
|
||||
if (_workerRequest.first == "CPU_HELP") {
|
||||
LOG_INFO("[AUTOPLUGIN]CPU_HELP:infer:%ld", _cpuHelpInferCount + count);
|
||||
LOG_INFO_TAG("CPU_HELP:infer:%ld", _cpuHelpInferCount + count);
|
||||
if (_cpuHelpFps > 0.0) {
|
||||
LOG_INFO("[AUTOPLUGIN]CPU_HELP:fps:%lf", _cpuHelpFps);
|
||||
LOG_INFO_TAG("CPU_HELP:fps:%lf", _cpuHelpFps);
|
||||
} else if (count >= 1) {
|
||||
std::chrono::duration<double, std::milli> durtation =
|
||||
reqAllEndTimes.back() - reqAllStartTimes.front();
|
||||
LOG_INFO("[AUTOPLUGIN]CPU_HELP:fps:%lf", count * 1000 / durtation.count());
|
||||
LOG_INFO_TAG("CPU_HELP:fps:%lf", count * 1000 / durtation.count());
|
||||
}
|
||||
} else {
|
||||
LOG_INFO("[AUTOPLUGIN]%s:infer:%ld", _workerRequest.first.c_str(), count);
|
||||
LOG_INFO_TAG("%s:infer:%ld", _workerRequest.first.c_str(), count);
|
||||
auto n = reqAllStartTimes.size();
|
||||
Time time;
|
||||
while (!reqAllStartTimes.empty()) {
|
||||
@ -265,7 +266,7 @@ MultiSchedule::~MultiSchedule() {
|
||||
if (n >= 1) {
|
||||
std::chrono::duration<double, std::milli> durtation =
|
||||
reqAllEndTimes.back() - time;
|
||||
LOG_INFO("[AUTOPLUGIN]%s:fps:%lf", _workerRequest.first.c_str(),
|
||||
LOG_INFO_TAG("%s:fps:%lf", _workerRequest.first.c_str(),
|
||||
n * 1000 / durtation.count());
|
||||
}
|
||||
}
|
||||
@ -306,6 +307,8 @@ IInferPtr MultiSchedule::CreateInferRequest() {
|
||||
syncRequestImpl,
|
||||
execNetwork->_callbackExecutor);
|
||||
}
|
||||
|
||||
std::string MultiSchedule::GetLogTag() const noexcept {
|
||||
return _LogTag;
|
||||
}
|
||||
} // namespace MultiDevicePlugin
|
||||
|
||||
|
@ -47,6 +47,7 @@ protected:
|
||||
virtual void GenerateWorkers(const std::string& device, const IE::SoExecutableNetworkInternal& executableNetwork);
|
||||
static bool RunPipelineTask(IE::Task& inferPipelineTask, NotBusyWorkerRequests& idleWorkerRequests, const DeviceName& preferred_device);
|
||||
virtual bool ScheduleToWorkerInferRequest(IE::Task, DeviceName preferred_device = "");
|
||||
std::string GetLogTag() const noexcept;
|
||||
|
||||
protected:
|
||||
IE::ThreadSafeQueue<IE::Task> _inferPipelineTasks;
|
||||
@ -60,6 +61,7 @@ protected:
|
||||
Time _cpuHelpReleaseTime;
|
||||
unsigned int _cpuHelpInferCount = 0;
|
||||
double _cpuHelpFps = 0.0;
|
||||
std::string _LogTag;
|
||||
};
|
||||
|
||||
} // namespace MultiDevicePlugin
|
||||
|
@ -203,7 +203,7 @@ std::vector<DeviceInformation> MultiDeviceInferencePlugin::ParseMetaDevices(cons
|
||||
uniqueName = fullDeviceName + "_" + defaultDeviceID;
|
||||
}
|
||||
|
||||
LOG_DEBUG("[AUTOPLUGIN]:deviceNameWithID:%s, defaultDeviceID:%s, uniqueName:%s",
|
||||
LOG_DEBUG_TAG("deviceNameWithID:%s, defaultDeviceID:%s, uniqueName:%s",
|
||||
deviceNameWithID.c_str(), defaultDeviceID.c_str(), uniqueName.c_str());
|
||||
// create meta device
|
||||
metaDevices.push_back({deviceNameWithID, getDeviceConfig(deviceNameWithID), numRequests, defaultDeviceID, uniqueName, devicePriority});
|
||||
@ -332,7 +332,8 @@ IExecutableNetworkInternal::Ptr MultiDeviceInferencePlugin::LoadNetworkImpl(cons
|
||||
if (modelPath.empty() && network.getFunction() == nullptr) {
|
||||
IE_THROW() << GetName() << " device supports just ngraph network representation";
|
||||
}
|
||||
|
||||
// to use plugin's name as the log tag
|
||||
_LogTag = GetName();
|
||||
auto fullConfig = mergeConfigs(_config, config);
|
||||
// collect the settings that are applicable to the devices we are loading the network to
|
||||
std::unordered_map<std::string, InferenceEngine::Parameter> multiNetworkConfig;
|
||||
@ -362,7 +363,7 @@ IExecutableNetworkInternal::Ptr MultiDeviceInferencePlugin::LoadNetworkImpl(cons
|
||||
std::string clonedModelPath = modelPath;
|
||||
if (modelPath.empty()) {
|
||||
// if network is valid
|
||||
LOG_INFO("[AUTOPLUGIN]:load with CNN network");
|
||||
LOG_INFO_TAG("load with CNN network");
|
||||
supportDevices = FilterDeviceByNetwork(supportDevicesByConfig, network);
|
||||
// clone the network, in case of reshape conflict
|
||||
clonedNetwork = InferenceEngine::details::cloneNetwork(network);
|
||||
@ -372,9 +373,9 @@ IExecutableNetworkInternal::Ptr MultiDeviceInferencePlugin::LoadNetworkImpl(cons
|
||||
clonedNetwork = GetCore()->ReadNetwork(modelPath, std::string());
|
||||
// do we really need to disable model path?
|
||||
clonedModelPath = "";
|
||||
LOG_INFO("[AUTOPLUGIN]:load with CNN network");
|
||||
LOG_INFO_TAG("load with CNN network");
|
||||
} else {
|
||||
LOG_INFO("[AUTOPLUGIN]:load with model path");
|
||||
LOG_INFO_TAG("load with model path");
|
||||
}
|
||||
}
|
||||
// replace the configure with configure that auto want to pass to device
|
||||
@ -389,7 +390,7 @@ IExecutableNetworkInternal::Ptr MultiDeviceInferencePlugin::LoadNetworkImpl(cons
|
||||
for (auto& config : configs) {
|
||||
if (std::find(validConfigKey.begin(), validConfigKey.end(), config.first) != validConfigKey.end()) {
|
||||
deviceConfig.insert({config.first, config.second});
|
||||
LOG_INFO("[AUTOPLUGIN]:device:%s, config:%s=%s", iter->deviceName.c_str(),
|
||||
LOG_INFO_TAG("device:%s, config:%s=%s", iter->deviceName.c_str(),
|
||||
config.first.c_str(), config.second.c_str());
|
||||
}
|
||||
}
|
||||
@ -401,7 +402,7 @@ IExecutableNetworkInternal::Ptr MultiDeviceInferencePlugin::LoadNetworkImpl(cons
|
||||
iter->config = deviceConfig;
|
||||
strDevices += iter->deviceName;
|
||||
strDevices += ((iter + 1) == supportDevices.end()) ? "" : ",";
|
||||
LOG_INFO("[AUTOPLUGIN]:device:%s, priority:%ld", iter->deviceName.c_str(), iter->devicePriority);
|
||||
LOG_INFO_TAG("device:%s, priority:%ld", iter->deviceName.c_str(), iter->devicePriority);
|
||||
}
|
||||
autoSContext->_modelPath = clonedModelPath;
|
||||
// clone the network, in case of reshape conflict
|
||||
@ -411,12 +412,23 @@ IExecutableNetworkInternal::Ptr MultiDeviceInferencePlugin::LoadNetworkImpl(cons
|
||||
autoSContext->_strDevices = strDevices;
|
||||
autoSContext->_plugin = this;
|
||||
autoSContext->_core = GetCore();
|
||||
autoSContext->_LogTag = _LogTag;
|
||||
auto tmpiter = fullConfig.find(ov::intel_auto::device_bind_buffer.name());
|
||||
if (tmpiter != fullConfig.end() && tmpiter->second == PluginConfigParams::YES)
|
||||
autoSContext->_bindBuffer = true;
|
||||
return std::make_shared<AutoExecutableNetwork>(autoSContext, std::make_shared<AutoSchedule>());
|
||||
}
|
||||
OV_ITT_SCOPED_TASK(itt::domains::MULTIPlugin, "MultiDeviceInferencePlugin::LoadNetworkImpl:MultiMode");
|
||||
// if is cumulative, PERFORMANCE_HINT set to THROUGHPUT and _LogTag set to AUTO
|
||||
auto configIter =
|
||||
std::find_if(fullConfig.begin(), fullConfig.end(), [](const std::pair<std::string, std::string>& config) {
|
||||
return (config.first == CONFIG_KEY(PERFORMANCE_HINT));
|
||||
});
|
||||
if (configIter != fullConfig.end() && configIter->second == InferenceEngine::PluginConfigParams::CUMULATIVE_THROUGHPUT) {
|
||||
configIter->second = InferenceEngine::PluginConfigParams::THROUGHPUT;
|
||||
_LogTag = "AUTO";
|
||||
LOG_INFO_TAG("CUMULATIVE Call MULTI PERFORMACE_HINT set to THROUGHPUT");
|
||||
}
|
||||
if (priorities == fullConfig.end()) {
|
||||
IE_THROW() << "KEY_MULTI_DEVICE_PRIORITIES key is not set for " << GetName() << " device";
|
||||
} else { // for use case -d MULTI:xPU or -d AUTO:xPU
|
||||
@ -480,6 +492,7 @@ IExecutableNetworkInternal::Ptr MultiDeviceInferencePlugin::LoadNetworkImpl(cons
|
||||
multiSContext->_config = multiNetworkConfig;
|
||||
multiSContext->_needPerfCounters = enablePerfCounters;
|
||||
multiSContext->_core = GetCore();
|
||||
multiSContext->_LogTag = _LogTag;
|
||||
IExecutableNetworkInternal::Ptr impl;
|
||||
auto tmpiter = fullConfig.find(ov::intel_auto::device_bind_buffer.name());
|
||||
if (tmpiter != fullConfig.end() && tmpiter->second == PluginConfigParams::YES)
|
||||
@ -753,7 +766,7 @@ std::string MultiDeviceInferencePlugin::GetDeviceList(const std::map<std::string
|
||||
|
||||
deviceVec = deviceVec.size() == 0 ? deviceList : mergeDeviceList();
|
||||
for (auto& iter : devicesToBeDeleted) {
|
||||
LOG_INFO("[AUTOPLUGIN]:remove %s from device candidate list", iter.c_str());
|
||||
LOG_INFO_TAG("remove %s from device candidate list", iter.c_str());
|
||||
updateDeviceVec(iter);
|
||||
}
|
||||
for (auto&& device : deviceVec) {
|
||||
@ -905,7 +918,7 @@ std::vector<DeviceInformation> MultiDeviceInferencePlugin::FilterDeviceByNetwork
|
||||
for (auto& op : model->get_ops()) {
|
||||
if (std::dynamic_pointer_cast<ngraph::op::AssignBase>(op) ||
|
||||
std::dynamic_pointer_cast<ngraph::op::ReadValueBase>(op)) {
|
||||
LOG_INFO("[AUTOPLUGIN]:stateful mode, try deployed to CPU");
|
||||
LOG_INFO_TAG("stateful mode, try deployed to CPU");
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@ -924,4 +937,7 @@ std::vector<DeviceInformation> MultiDeviceInferencePlugin::FilterDeviceByNetwork
|
||||
}
|
||||
return metaDevices;
|
||||
}
|
||||
std::string MultiDeviceInferencePlugin::GetLogTag() const noexcept {
|
||||
return _LogTag;
|
||||
}
|
||||
} // namespace MultiDevicePlugin
|
||||
|
@ -72,8 +72,10 @@ private:
|
||||
const std::map<std::string, std::string>& config);
|
||||
std::vector<DeviceInformation> FilterDeviceByNetwork(const std::vector<DeviceInformation>& metaDevices,
|
||||
InferenceEngine::CNNNetwork network);
|
||||
std::string GetLogTag() const noexcept;
|
||||
static std::mutex _mtx;
|
||||
static std::map<unsigned int, std::list<std::string>> _priorityMap;
|
||||
std::string _LogTag;
|
||||
static std::set<std::string> _availableDevices;
|
||||
};
|
||||
|
||||
|
@ -33,6 +33,12 @@
|
||||
#define LOG_WARNING(...) HLogPrint(true, false, MultiDevicePlugin::LogLevel::WARN, "WARN", nullptr, __VA_ARGS__)
|
||||
#define LOG_ERROR(...) HLogPrint(true, false, MultiDevicePlugin::LogLevel::ERROR, "ERROR", nullptr, __VA_ARGS__)
|
||||
|
||||
// To use macro LOG_XXX_TAG, need to implement GetLogTag() which returns log tag, the type of log tag is string
|
||||
#define LOG_DEBUG_TAG(...) HLogPrint(true, false, MultiDevicePlugin::LogLevel::DEBUG, "DEBUG", GetLogTag().c_str(), __VA_ARGS__)
|
||||
#define LOG_INFO_TAG(...) HLogPrint(true, false, MultiDevicePlugin::LogLevel::INFO, "INFO", GetLogTag().c_str(), __VA_ARGS__)
|
||||
#define LOG_WARNING_TAG(...) HLogPrint(true, false, MultiDevicePlugin::LogLevel::WARN, "WARN", GetLogTag().c_str(), __VA_ARGS__)
|
||||
#define LOG_ERROR_TAG(...) HLogPrint(true, false, MultiDevicePlugin::LogLevel::ERROR, "ERROR", GetLogTag().c_str(), __VA_ARGS__)
|
||||
|
||||
#define TraceCallStacks(...) HLogPrint(true, true, MultiDevicePlugin::LogLevel::DEBUG, "DEBUG", nullptr, __VA_ARGS__)
|
||||
#define TraceCallStack() TraceCallStacks(" ")
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user