Fix cases then const blob precision is not FP32/FP16 (#1020)

Co-authored-by: kmagiers <kmagiers@intel.com>
This commit is contained in:
Kamil Magierski
2020-06-22 14:46:01 +02:00
committed by GitHub
parent 491e5e9fbb
commit f675848680
2 changed files with 10 additions and 2 deletions

View File

@@ -199,9 +199,17 @@ class ScaleFactorPerLayer<InferenceEngine::CNNLayer *> {
if (cnnLayer->type == "Const") {
auto blob = cnnLayer->blobs["custom"];
if (blob->getTensorDesc().getPrecision() == InferenceEngine::Precision::FP16) {
auto blob_precision = blob->getTensorDesc().getPrecision();
if (blob_precision != InferenceEngine::Precision::FP32 && blob_precision != InferenceEngine::Precision::FP16) {
quant->_dst_quant.scale = 1.0f;
return true;
}
if (blob_precision == InferenceEngine::Precision::FP16) {
blob = make_fp32_blob(blob);
}
auto max_val = std::numeric_limits<float>::min();
auto min_val = std::numeric_limits<float>::max();

View File

@@ -21,7 +21,7 @@ const std::vector<std::map<std::string, std::string>> configs = {
}
};
INSTANTIATE_TEST_CASE_P(ConcatQuantization, ConcatQuantization,
INSTANTIATE_TEST_CASE_P(smoke_ConcatQuantization, ConcatQuantization,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_GNA),