fix build issue due to implicit-const-int-float-conversion and remove unused lambda function (#7470)

* fix build issue due to implicit-const-int-float-conversion and remove unused lambda function

* just remove it instead of commenting out

Co-authored-by: FuhengWu@Oracle <fuheng.wu@oracle.com>
This commit is contained in:
Henry Fuheng Wu
2021-09-14 01:14:44 -07:00
committed by GitHub
parent 4aad638d15
commit 7328ee1e35
3 changed files with 4 additions and 7 deletions

View File

@@ -232,7 +232,7 @@ inline InferenceEngine::Blob::Ptr fp32_to_precision_blob(InferenceEngine::Blob::
}
f32Value = f32Value * scale_factor;
if (f32Value > std::numeric_limits<T>::max()) {
if (f32Value > static_cast<float>(std::numeric_limits<T>::max())) {
precValue = std::numeric_limits<T>::max();
} else if (f32Value < std::numeric_limits<T>::min()) {
precValue = std::numeric_limits<T>::min();

View File

@@ -486,9 +486,6 @@ void GNAPlugin::UpdateInputScaleFromNetwork(InferenceEngine::CNNNetwork & networ
<< "unsupported, per-channel quantization for input layer : " << input.second->name();
}
auto fp32eq = [](float p1, float p2) -> bool {
return (std::abs(p1 - p2) <= 0.00001f * std::min(std::abs(p1), std::abs(p2)));
};
// GNA input is always quantized to int16, so number of levels can't be greater than max uint16
// todo: should be solved in POT (issue 63330)
size_t levels = std::min(fqLayer.getLevels(), static_cast<size_t>(std::numeric_limits<uint16_t>::max() + 1));

View File

@@ -549,21 +549,21 @@ void PwlDesignOpt(const DnnActivation activation_type,
break;
case kActLog: {
double x_min = (1 + ~XBASEMASK) / scale_in;
double x_max = ((INT32_MAX / scale_in) < LOG_DOMAIN) ? (INT32_MAX / scale_in) : LOG_DOMAIN;
double x_max = ((static_cast<double>(INT32_MAX) / scale_in) < LOG_DOMAIN) ? (static_cast<double>(INT32_MAX) / scale_in) : LOG_DOMAIN;
pwl = pwl_search(activation_type, x_min, x_max, PWL_DESIGN_THRESHOLD, pwlMaxErrorPercent, PWL_DESIGN_SAMPLES, err_pct);
make_gna_pwl(activation_type, pwl, x_min, x_max, scale_in, scale_out, low_precision, ptr_segment);
break;
}
case kActNegLog: {
double x_min = (1 + ~XBASEMASK) / scale_in;
double x_max = ((INT32_MAX / scale_in) < LOG_DOMAIN) ? (INT32_MAX / scale_in) : LOG_DOMAIN;
double x_max = ((static_cast<double>(INT32_MAX) / scale_in) < LOG_DOMAIN) ? (static_cast<double>(INT32_MAX) / scale_in) : LOG_DOMAIN;
pwl = pwl_search(activation_type, x_min, x_max, PWL_DESIGN_THRESHOLD, pwlMaxErrorPercent, PWL_DESIGN_SAMPLES, err_pct);
make_gna_pwl(activation_type, pwl, x_min, x_max, scale_in, scale_out, low_precision, ptr_segment);
break;
}
case kActNegHalfLog: {
double x_min = (1 + ~XBASEMASK) / scale_in;
double x_max = ((INT32_MAX / scale_in) < LOG_DOMAIN) ? (INT32_MAX / scale_in) : LOG_DOMAIN;
double x_max = ((static_cast<double>(INT32_MAX) / scale_in) < LOG_DOMAIN) ? (static_cast<double>(INT32_MAX) / scale_in) : LOG_DOMAIN;
pwl = pwl_search(activation_type, x_min, x_max, PWL_DESIGN_THRESHOLD, pwlMaxErrorPercent, PWL_DESIGN_SAMPLES, err_pct);
make_gna_pwl(activation_type, pwl, x_min, x_max, scale_in, scale_out, low_precision, ptr_segment);
break;