[GNA] fixed conversion from fp16 to fp32 networks in case of const blobs (#2446)

This commit is contained in:
Eugene Smirnov 2020-09-29 19:44:12 +03:00 committed by GitHub
parent 266d275293
commit fecc7eac90
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 3 additions and 8 deletions

View File

@ -50,15 +50,10 @@ inline bool convertWeights(InferenceEngine::CNNLayer* lp) {
for (auto& dataItem : lp->outData) {
dataItem->setPrecision(InferenceEngine::Precision::FP32);
}
InferenceEngine::BlobMap newBlobs;
for (auto& blob_pair : lp->blobs) {
auto blob_name = blob_pair.first;
auto blob_ptr = blob_pair.second;
auto &blob_ptr = blob_pair.second;
if (blob_ptr->getTensorDesc().getPrecision() == InferenceEngine::Precision::FP16) {
auto new_blob = make_fp32_blob(blob_ptr);
newBlobs[blob_name] = new_blob;
} else {
newBlobs[blob_name] = blob_ptr;
blob_ptr = make_fp32_blob(blob_ptr);
}
}

View File

@ -13,7 +13,7 @@ using namespace LayerTestsDefinitions;
namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP16
};
using ConfigType = std::map<std::string, std::string>;