[CPU] Added support of 'Batched' memory type (#10909)

This commit is contained in:
Egor Shulman 2022-04-05 09:20:19 +03:00 committed by GitHub
parent 4ad20fb53f
commit ed190374fd
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 24 additions and 2 deletions

View File

@ -4,6 +4,7 @@
#include "cpp_interfaces/interface/ie_iinfer_request_internal.hpp"
#include <ie_parallel.hpp>
#include <map>
#include <memory>
#include <openvino/core/partial_shape.hpp>
@ -324,7 +325,7 @@ void IInferRequestInternal::convertBatchedInputBlob(const std::string& name, con
auto ptr = mem_blob->wmap();
// Perform memory copy
for (size_t i = 0; i < batched_blob->size(); i++) {
InferenceEngine::parallel_for(batched_blob->size(), [&](size_t i) {
const auto& blob = as<MemoryBlob>(batched_blob->getBlob(i));
OPENVINO_ASSERT(mem_blob, "Internal error - can't cast blob ", i, " to MemoryBlob");
const auto& blob_desc = blob->getTensorDesc().getBlockingDesc();
@ -346,7 +347,7 @@ void IInferRequestInternal::convertBatchedInputBlob(const std::string& name, con
blob->rmap().as<uint8_t*>() +
blob->getTensorDesc().getBlockingDesc().getOffsetPadding() * blob->element_size(),
blob->byteSize());
}
});
SetBlob(name, mem_blob);
}

View File

@ -156,6 +156,7 @@ void InferRequestBase::InferImpl() {
graph = &(graphLock._graph);
ThrowIfCanceled();
convertBatchedInputBlobs();
if (graph->hasDynamicInput()) {
redefineMemoryForInputNodes();
@ -724,6 +725,7 @@ void InferRequest::SetBlob(const std::string& name, const InferenceEngine::Blob:
externalPtr.erase(name);
}
_inputs[name] = data;
_batched_inputs.erase(name);
} else {
if (compoundBlobPassed) {
IE_THROW(NotImplemented) << "Can't set compound blob: supported only for input pre-processing";
@ -758,6 +760,10 @@ void InferRequest::SetBlob(const std::string& name, const InferenceEngine::Blob:
}
}
void InferRequest::SetBlobsImpl(const std::string& name, const InferenceEngine::BatchedBlob::Ptr& batched_blob) {
_batched_inputs[name] = batched_blob;
}
InferenceEngine::Blob::Ptr InferRequest::GetBlob(const std::string& name) {
OV_ITT_SCOPED_TASK(itt::domains::intel_cpu, "GetBlob");

View File

@ -92,6 +92,7 @@ public:
std::shared_ptr<ExecNetwork> execNetwork);
void SetBlob(const std::string& name, const InferenceEngine::Blob::Ptr &data) override;
void SetBlobsImpl(const std::string& name, const InferenceEngine::BatchedBlob::Ptr& batched_blob) override;
InferenceEngine::Blob::Ptr GetBlob(const std::string& name) override;
private:

View File

@ -0,0 +1,14 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "behavior/ov_infer_request/batched_tensors.hpp"
using namespace ov::test::behavior;
namespace {
INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestBatchedTests,
::testing::Values(CommonTestUtils::DEVICE_CPU),
OVInferRequestBatchedTests::getTestCaseName);
} // namespace