fixed fq scaling bug in layerwise tuning (#8346)
This commit is contained in:
parent
a693859d8b
commit
7db0973ba7
@ -93,8 +93,8 @@ class FakeQuantize(torch.nn.Module):
|
||||
scale = self.scale.exp()
|
||||
s = self.val_h * scale.reciprocal()
|
||||
x = x - self.min
|
||||
x = x.clamp(max=self.val_h, min=self.val_l)
|
||||
x = x * s
|
||||
x = x.clamp(max=self.val_h, min=self.val_l)
|
||||
x = STERound.apply(x, self.val_l, self.val_h)
|
||||
x = x * s.reciprocal() + self.min
|
||||
return x
|
||||
|
Loading…
Reference in New Issue
Block a user