adding I64/U64 support to the auto-batching (#10234)

* adding I64/U64/etc support

* inputs precisions tests instantiations for the GPU and BATCH:GPU
This commit is contained in:
Maxim Shevtsov
2022-02-09 18:28:13 +03:00
committed by GitHub
parent f56c640550
commit c0a375f844
2 changed files with 103 additions and 10 deletions

View File

@@ -103,27 +103,59 @@ void AutoBatchInferRequest::ShareBlobsWithBatchRequest() {
_batchId,
_batchSize);
break;
case InferenceEngine::Precision::I16:
res = create_shared_blob_on_top_of_batched_blob<InferenceEngine::Precision::I16>(
_myBatchedRequestWrapper._inferRequestBatched->GetBlob(it.first),
_batchId,
_batchSize);
break;
case InferenceEngine::Precision::U16:
res = create_shared_blob_on_top_of_batched_blob<InferenceEngine::Precision::U16>(
_myBatchedRequestWrapper._inferRequestBatched->GetBlob(it.first),
_batchId,
_batchSize);
break;
case InferenceEngine::Precision::I16:
res = create_shared_blob_on_top_of_batched_blob<InferenceEngine::Precision::I16>(
case InferenceEngine::Precision::FP64:
res = create_shared_blob_on_top_of_batched_blob<InferenceEngine::Precision::FP64>(
_myBatchedRequestWrapper._inferRequestBatched->GetBlob(it.first),
_batchId,
_batchSize);
break;
case InferenceEngine::Precision::FP16:
res = create_shared_blob_on_top_of_batched_blob<InferenceEngine::Precision::FP16>(
_myBatchedRequestWrapper._inferRequestBatched->GetBlob(it.first),
_batchId,
_batchSize);
break;
case InferenceEngine::Precision::BF16:
res = create_shared_blob_on_top_of_batched_blob<InferenceEngine::Precision::BF16>(
_myBatchedRequestWrapper._inferRequestBatched->GetBlob(it.first),
_batchId,
_batchSize);
break;
case InferenceEngine::Precision::U64:
res = create_shared_blob_on_top_of_batched_blob<InferenceEngine::Precision::U64>(
_myBatchedRequestWrapper._inferRequestBatched->GetBlob(it.first),
_batchId,
_batchSize);
case InferenceEngine::Precision::I64:
res = create_shared_blob_on_top_of_batched_blob<InferenceEngine::Precision::I64>(
_myBatchedRequestWrapper._inferRequestBatched->GetBlob(it.first),
_batchId,
_batchSize);
break;
case InferenceEngine::Precision::U8:
case InferenceEngine::Precision::BOOL:
res = create_shared_blob_on_top_of_batched_blob<InferenceEngine::Precision::U8>(
_myBatchedRequestWrapper._inferRequestBatched->GetBlob(it.first),
_batchId,
_batchSize);
break;
case InferenceEngine::Precision::BOOL:
res = create_shared_blob_on_top_of_batched_blob<InferenceEngine::Precision::BOOL>(
_myBatchedRequestWrapper._inferRequestBatched->GetBlob(it.first),
_batchId,
_batchSize);
break;
default:
IE_THROW() << "Unsupported input precision " << it.second->getTensorDesc().getPrecision();
}
@@ -152,27 +184,60 @@ void AutoBatchInferRequest::ShareBlobsWithBatchRequest() {
_batchId,
_batchSize);
break;
case InferenceEngine::Precision::I16:
res = create_shared_blob_on_top_of_batched_blob<InferenceEngine::Precision::I16>(
_myBatchedRequestWrapper._inferRequestBatched->GetBlob(it.first),
_batchId,
_batchSize);
break;
case InferenceEngine::Precision::U16:
res = create_shared_blob_on_top_of_batched_blob<InferenceEngine::Precision::U16>(
_myBatchedRequestWrapper._inferRequestBatched->GetBlob(it.first),
_batchId,
_batchSize);
break;
case InferenceEngine::Precision::I16:
res = create_shared_blob_on_top_of_batched_blob<InferenceEngine::Precision::I16>(
case InferenceEngine::Precision::FP64:
res = create_shared_blob_on_top_of_batched_blob<InferenceEngine::Precision::FP64>(
_myBatchedRequestWrapper._inferRequestBatched->GetBlob(it.first),
_batchId,
_batchSize);
break;
case InferenceEngine::Precision::FP16:
res = create_shared_blob_on_top_of_batched_blob<InferenceEngine::Precision::FP16>(
_myBatchedRequestWrapper._inferRequestBatched->GetBlob(it.first),
_batchId,
_batchSize);
break;
case InferenceEngine::Precision::BF16:
res = create_shared_blob_on_top_of_batched_blob<InferenceEngine::Precision::BF16>(
_myBatchedRequestWrapper._inferRequestBatched->GetBlob(it.first),
_batchId,
_batchSize);
break;
case InferenceEngine::Precision::U64:
res = create_shared_blob_on_top_of_batched_blob<InferenceEngine::Precision::U64>(
_myBatchedRequestWrapper._inferRequestBatched->GetBlob(it.first),
_batchId,
_batchSize);
break;
case InferenceEngine::Precision::I64:
res = create_shared_blob_on_top_of_batched_blob<InferenceEngine::Precision::I64>(
_myBatchedRequestWrapper._inferRequestBatched->GetBlob(it.first),
_batchId,
_batchSize);
break;
case InferenceEngine::Precision::U8:
case InferenceEngine::Precision::BOOL:
res = create_shared_blob_on_top_of_batched_blob<InferenceEngine::Precision::U8>(
_myBatchedRequestWrapper._inferRequestBatched->GetBlob(it.first),
_batchId,
_batchSize);
break;
case InferenceEngine::Precision::BOOL:
res = create_shared_blob_on_top_of_batched_blob<InferenceEngine::Precision::BOOL>(
_myBatchedRequestWrapper._inferRequestBatched->GetBlob(it.first),
_batchId,
_batchSize);
break;
default:
IE_THROW(NotImplemented) << "Unsupported input precision " << it.second->getTensorDesc().getPrecision();
}