fixed fq scaling bug in layerwise tuning (#8346)

This commit is contained in:
Tagir Rakipov 2021-11-08 12:57:13 +03:00 committed by GitHub
parent a693859d8b
commit 7db0973ba7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -93,8 +93,8 @@ class FakeQuantize(torch.nn.Module):
scale = self.scale.exp()
s = self.val_h * scale.reciprocal()
x = x - self.min
x = x.clamp(max=self.val_h, min=self.val_l)
x = x * s
x = x.clamp(max=self.val_h, min=self.val_l)
x = STERound.apply(x, self.val_l, self.val_h)
x = x * s.reciprocal() + self.min
return x