Fix cases then const blob precision is not FP32/FP16 (#1020)
Co-authored-by: kmagiers <kmagiers@intel.com>
This commit is contained in:
@@ -199,9 +199,17 @@ class ScaleFactorPerLayer<InferenceEngine::CNNLayer *> {
|
||||
|
||||
if (cnnLayer->type == "Const") {
|
||||
auto blob = cnnLayer->blobs["custom"];
|
||||
if (blob->getTensorDesc().getPrecision() == InferenceEngine::Precision::FP16) {
|
||||
auto blob_precision = blob->getTensorDesc().getPrecision();
|
||||
|
||||
if (blob_precision != InferenceEngine::Precision::FP32 && blob_precision != InferenceEngine::Precision::FP16) {
|
||||
quant->_dst_quant.scale = 1.0f;
|
||||
return true;
|
||||
}
|
||||
|
||||
if (blob_precision == InferenceEngine::Precision::FP16) {
|
||||
blob = make_fp32_blob(blob);
|
||||
}
|
||||
|
||||
auto max_val = std::numeric_limits<float>::min();
|
||||
auto min_val = std::numeric_limits<float>::max();
|
||||
|
||||
|
||||
@@ -21,7 +21,7 @@ const std::vector<std::map<std::string, std::string>> configs = {
|
||||
}
|
||||
};
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(ConcatQuantization, ConcatQuantization,
|
||||
INSTANTIATE_TEST_CASE_P(smoke_ConcatQuantization, ConcatQuantization,
|
||||
::testing::Combine(
|
||||
::testing::ValuesIn(netPrecisions),
|
||||
::testing::Values(CommonTestUtils::DEVICE_GNA),
|
||||
|
||||
Reference in New Issue
Block a user