From fecc7eac908ee5efe3305a777a6afdf8053a7834 Mon Sep 17 00:00:00 2001 From: Eugene Smirnov Date: Tue, 29 Sep 2020 19:44:12 +0300 Subject: [PATCH] [GNA] fixed conversion from fp16 to fp32 networks in case of const blobs (#2446) --- .../src/gna_plugin/frontend/weights_converter.hpp | 9 ++------- .../single_layer_tests/fake_quantize.cpp | 2 +- 2 files changed, 3 insertions(+), 8 deletions(-) diff --git a/inference-engine/src/gna_plugin/frontend/weights_converter.hpp b/inference-engine/src/gna_plugin/frontend/weights_converter.hpp index 040f7bb11f6..064e84988fe 100644 --- a/inference-engine/src/gna_plugin/frontend/weights_converter.hpp +++ b/inference-engine/src/gna_plugin/frontend/weights_converter.hpp @@ -50,15 +50,10 @@ inline bool convertWeights(InferenceEngine::CNNLayer* lp) { for (auto& dataItem : lp->outData) { dataItem->setPrecision(InferenceEngine::Precision::FP32); } - InferenceEngine::BlobMap newBlobs; for (auto& blob_pair : lp->blobs) { - auto blob_name = blob_pair.first; - auto blob_ptr = blob_pair.second; + auto &blob_ptr = blob_pair.second; if (blob_ptr->getTensorDesc().getPrecision() == InferenceEngine::Precision::FP16) { - auto new_blob = make_fp32_blob(blob_ptr); - newBlobs[blob_name] = new_blob; - } else { - newBlobs[blob_name] = blob_ptr; + blob_ptr = make_fp32_blob(blob_ptr); } } diff --git a/inference-engine/tests/functional/plugin/gna/shared_tests_instances/single_layer_tests/fake_quantize.cpp b/inference-engine/tests/functional/plugin/gna/shared_tests_instances/single_layer_tests/fake_quantize.cpp index bad19b03e0e..9e5896e8abb 100644 --- a/inference-engine/tests/functional/plugin/gna/shared_tests_instances/single_layer_tests/fake_quantize.cpp +++ b/inference-engine/tests/functional/plugin/gna/shared_tests_instances/single_layer_tests/fake_quantize.cpp @@ -13,7 +13,7 @@ using namespace LayerTestsDefinitions; namespace { const std::vector netPrecisions = { - InferenceEngine::Precision::FP32, + InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP16 }; using ConfigType = std::map;