Fix tensor shape issue

This commit is contained in:
River,Li 2023-06-26 00:40:00 +08:00
parent e56f5a2bfe
commit 90a862d2b7
3 changed files with 17 additions and 25 deletions

View File

@ -1070,11 +1070,8 @@ void Graph::PullOutputData(std::unordered_map<std::string, ov::Tensor>& out,
}
auto& aux_tensor = it->second;
// Dynamic case
if (outDims != aux_tensor.get_shape()) {
aux_tensor.set_shape(outDims);
}
void* ext_blob_ptr = it->second.data();
aux_tensor.set_shape(out[name].get_shape());
ext_blob_ptr = aux_tensor.data();
if ((intr_blob_ptr == nullptr) || (ext_blob_ptr == nullptr)) {
OPENVINO_THROW("Get tensor has no allocated memory");
}
@ -1082,8 +1079,8 @@ void Graph::PullOutputData(std::unordered_map<std::string, ov::Tensor>& out,
cpu_convert(intr_blob_ptr,
ext_blob_ptr,
srcPrec,
InferenceEngine::details::convertPrecision(it->second.get_element_type()),
it->second.get_size());
InferenceEngine::details::convertPrecision(aux_tensor.get_element_type()),
intr_blob.GetDescWithType<BlockedMemoryDesc>()->getPaddedElementsCount());
continue;
}

View File

@ -471,20 +471,19 @@ ov::Tensor SyncInferRequest::get_tensor(const ov::Output<const ov::Node>& _port)
// input tensor: will be copied to compiled tensor before do graph inference
// output tensor: has be copied from graph's memory to aux tensor
if (_orig_ports_map.find(port_name) == _orig_ports_map.end()) {
OPENVINO_THROW("Cannot find original port, name: ", port_name);
}
// Find aux tensor, will create one if cannot find
if (_aux_tensors.find(port_name) == _aux_tensors.end()) {
auto it = _orig_ports_map.find(port_name);
if (it == _orig_ports_map.end()) {
OPENVINO_THROW("Cannot find original port, name: ", port_name);
}
auto port_shape = _orig_ports_map[port_name].get_partial_shape();
ov::Shape aux_shape;
if (port_shape.is_dynamic()) {
aux_shape = compiled_tensor.get_shape();
} else {
aux_shape = _orig_ports_map[port_name].get_shape();
}
auto port_shape = port.get_partial_shape();
auto it = _aux_tensors.find(port_name);
ov::Shape aux_shape = compiled_tensor.get_shape();
if (it == _aux_tensors.end()) {
_aux_tensors[port_name] = ov::Tensor(_orig_ports_map[port_name].get_element_type(), aux_shape);
} else if (port_shape.is_dynamic()) {
if (_aux_tensors[port_name].get_shape() != aux_shape)
_aux_tensors[port_name].set_shape(aux_shape);
}
return _aux_tensors[port_name];

View File

@ -428,11 +428,7 @@ Engine::compile_model(const std::shared_ptr<const ov::Model>& model, const ov::A
// verification of supported input
for (const auto &ii : model->inputs()) {
auto input_precision = ii.get_element_type();
//using hash_t = std::hash<typename std::underlying_type<ov::element::Type_t>::type>;
using hash_t = std::hash<ov::element::Type_t>;
static const std::unordered_set<ov::element::Type_t, hash_t> supported_precisions = {
static const std::unordered_set<ov::element::Type_t> supported_precisions = {
ov::element::Type_t::u8, ov::element::Type_t::i8,
ov::element::Type_t::u16, ov::element::Type_t::i16,
ov::element::Type_t::u32, ov::element::Type_t::i32,
@ -442,7 +438,7 @@ Engine::compile_model(const std::shared_ptr<const ov::Model>& model, const ov::A
ov::element::Type_t::boolean
};
if (!supported_precisions.count(ov::element::Type_t(input_precision))) {
if (!supported_precisions.count(input_precision)) {
IE_CPU_PLUGIN_THROW(NotImplemented)
<< "Input image format " << input_precision << " is not supported yet...";
}