Fix AUTO plugin mismatch issue for get_tensor function

This commit is contained in:
River,Li 2023-06-25 18:18:22 +08:00
parent 018331dba1
commit 48ff306b45
5 changed files with 30 additions and 25 deletions

View File

@ -996,7 +996,9 @@ void Graph::PushInputData(const std::string& name, const ov::Tensor &in) {
}
}
void Graph::PullOutputData(std::unordered_map<std::string, ov::Tensor> &out) {
void Graph::PullOutputData(std::unordered_map<std::string, ov::Tensor>& out,
std::unordered_map<std::string, bool>& out_precision_changed,
std::unordered_map<std::string, ov::Tensor>& aux_tensors) {
if (!IsReady())
IE_THROW() << "Wrong state. Topology not ready.";
@ -1060,6 +1062,25 @@ void Graph::PullOutputData(std::unordered_map<std::string, ov::Tensor> &out) {
void *ext_blob_ptr = ext_blob.data();
void *intr_blob_ptr = intr_blob.GetData();
// If output precision has been changed comparing to original model's output, it must be copied to aux tensor
if (out_precision_changed[name]) {
auto it = aux_tensors.find(name);
if (it == aux_tensors.end()) {
OPENVINO_THROW("Output precision has been changed, but cannot find its aux tensor.");
}
void* ext_blob_ptr = it->second.data();
if ((intr_blob_ptr == nullptr) || (ext_blob_ptr == nullptr)) {
OPENVINO_THROW("Get tensor has no allocated memory");
}
cpu_convert(intr_blob_ptr,
ext_blob_ptr,
srcPrec,
InferenceEngine::details::convertPrecision(it->second.get_element_type()),
it->second.get_size());
continue;
}
// That is the same memory. No need to copy
if (ext_blob_ptr == intr_blob_ptr) continue;

View File

@ -59,7 +59,9 @@ public:
}
void PushInputData(const std::string& name, const ov::Tensor& in);
void PullOutputData(std::unordered_map<std::string, ov::Tensor>& out);
void PullOutputData(std::unordered_map<std::string, ov::Tensor>& out,
std::unordered_map<std::string, bool>& out_precision_changed,
std::unordered_map<std::string, ov::Tensor>& aux_tensors);
void Infer(SyncInferRequest* request = nullptr);

View File

@ -80,6 +80,8 @@ void SyncInferRequest::create_infer_request() {
}
for (const auto& it : _output_ports_map) {
init_tensor(it.first);
// allocate aux tensor for output if output precision has been changed
get_tensor(it.second);
}
// Save all MemoryLayer data tensors. Will use insight about mechanics
@ -233,7 +235,7 @@ void SyncInferRequest::infer() {
throw_if_canceled();
graph->PullOutputData(_outputs);
graph->PullOutputData(_outputs, _port_precision_changed, _aux_tensors);
}
std::vector<ov::ProfilingInfo> SyncInferRequest::get_profiling_info() const {
@ -486,25 +488,7 @@ ov::Tensor SyncInferRequest::get_tensor(const ov::Output<const ov::Node>& _port)
_aux_tensors[port_name] = ov::Tensor(_orig_ports_map[port_name].get_element_type(), external_shape);
}
// input tensor is in aux tensors, don't need copy anything
auto& aux_tensor = _aux_tensors[port_name];
if (is_input) {
return aux_tensor;
}
// output tensor need copy data from compiled tensor
const void* srcData = compiled_tensor.data();
void* dstData = aux_tensor.data();
if ((dstData == nullptr) || (srcData == nullptr)) {
OPENVINO_THROW("Get tensor has no allocated memory");
}
cpu_convert(srcData,
dstData,
InferenceEngine::details::convertPrecision(compiled_tensor.get_element_type()),
InferenceEngine::details::convertPrecision(aux_tensor.get_element_type()),
compiled_tensor.get_size());
return aux_tensor;
return _aux_tensors[port_name];
}
std::vector<ov::Tensor> SyncInferRequest::get_tensors(const ov::Output<const ov::Node>& _port) const {

View File

@ -32,6 +32,7 @@
#include <cpu/x64/cpu_isa_traits.hpp>
#include <itt.h>
#include <unordered_set>
using namespace InferenceEngine;

View File

@ -209,9 +209,6 @@ std::vector<std::string> disabledTestPatterns() {
R"(.*smoke_Transpose(2|4|5|6)D/TransposeLayerTest.CompareWithRefs.*)",
// Issue: 113704 - Layout information maybe incorrect when covert tensor to blob
R"(.*smoke_.*BehaviorTests/InferRequestPreprocessConversionTest.*)",
// Issue: There are some problem when work with AUTO plugin API 2.0
R"(.*smoke_Auto_CachingSupportCase_CPU/CompileModelCacheTestBase.CompareWithRefImpl.*)",
R"(.*smoke_Auto_CachingSupportCase_CPU_Float/CompileModelCacheTestBase.CompareWithRefImpl.*)",
// Issue: JIT choose error issue, don't know why not choose jit_avx512_BF16
R"(.*smoke_PSROIPoolingAverageLayoutTest/PSROIPoolingLayerCPUTest.*)",
R"(.*smoke_PSROIPoolingBilinearLayoutTest/PSROIPoolingLayerCPUTest.*)",