added check for availability of GNA HW in QueueInference (#18549)
This commit is contained in:
committed by
GitHub
parent
7bceba1523
commit
072b9f1e2a
@@ -583,5 +583,10 @@ uint32_t GNADeviceHelper::retrieveMaxLayersCount() {
|
||||
return Limitations::kMaxLayersCountGNA3_X;
|
||||
}
|
||||
}
|
||||
|
||||
bool GNADeviceHelper::isHwAvailable() {
|
||||
return target->get_detected_device_version() != DeviceVersion::SoftwareEmulation &&
|
||||
target->get_detected_device_version() != DeviceVersion::NotSet;
|
||||
}
|
||||
} // namespace intel_gna
|
||||
} // namespace ov
|
||||
|
||||
@@ -123,6 +123,7 @@ public:
|
||||
void getGnaPerfCounters(std::map<std::string, InferenceEngine::InferenceEngineProfileInfo>& retPerfCounters);
|
||||
static std::string GetGnaLibraryVersion();
|
||||
|
||||
bool isHwAvailable();
|
||||
const GnaAllocations& getAllAllocations() const {
|
||||
return allAllocations;
|
||||
}
|
||||
|
||||
@@ -796,6 +796,10 @@ void GNAPlugin::DumpXNNToFile() const {
|
||||
}
|
||||
|
||||
uint32_t GNAPlugin::QueueInference(const InferenceEngine::BlobMap& inputs, InferenceEngine::BlobMap& result) {
|
||||
if (config.GetParameter(ov::intel_gna::execution_mode.name()).as<std::string>() == "GNA_HW" &&
|
||||
!gnadevice->isHwAvailable()) {
|
||||
THROW_GNA_EXCEPTION << "Execution mode GNA_HW is set, but hardware acceleration is unavailable";
|
||||
}
|
||||
auto freeWorker = requestWorkerPool_->findFreeModelWorker();
|
||||
if (freeWorker == nullptr) {
|
||||
if (!m_graph_compiler->memory_connection.empty()) {
|
||||
|
||||
Reference in New Issue
Block a user