[PT FE] Enable quantized conv and linear tests (#18723)
This commit is contained in:
parent
7e1d8283f2
commit
bc734dfaaa
@ -38,7 +38,8 @@ class TestQuantizedConv2D(PytorchLayerTest):
|
||||
self.conv.zero_point = int(zero_point)
|
||||
|
||||
def forward(self, x):
|
||||
x_quantized = torch.quantize_per_tensor(x, 1.0, 0, torch.quint8)
|
||||
x_quantized = torch.quantize_per_tensor(
|
||||
x, 1.0, 0, torch.quint8)
|
||||
conv = self.conv(x_quantized)
|
||||
return torch.dequantize(conv).contiguous()
|
||||
|
||||
@ -54,18 +55,26 @@ class TestQuantizedConv2D(PytorchLayerTest):
|
||||
"params",
|
||||
[
|
||||
pytest.param(
|
||||
{"weights_shape": [1, 3, 3, 3], "strides": 1, "pads": 0, "dilations": 1, "groups": 1},
|
||||
{"weights_shape": [1, 3, 3, 3], "strides": 1,
|
||||
"pads": 0, "dilations": 1, "groups": 1},
|
||||
marks=pytest.mark.xfail(
|
||||
reason="Output channels equal to 1 creates output that fails to cast to contiguous."
|
||||
),
|
||||
),
|
||||
{"weights_shape": [2, 3, 3, 3], "strides": 1, "pads": 0, "dilations": 1, "groups": 1},
|
||||
{"weights_shape": [2, 3, 3, 3], "strides": 2, "pads": 0, "dilations": 1, "groups": 1},
|
||||
{"weights_shape": [2, 3, 3, 3], "strides": 1, "pads": 1, "dilations": 1, "groups": 1},
|
||||
{"weights_shape": [2, 3, 3, 3], "strides": 1, "pads": 0, "dilations": 2, "groups": 1},
|
||||
{"weights_shape": [2, 3, 3, 3], "strides": 1, "pads": [0, 1], "dilations": 1, "groups": 1},
|
||||
{"weights_shape": [2, 3, 3, 3], "strides": 1, "pads": [1, 0], "dilations": 1, "groups": 1},
|
||||
{"weights_shape": [3, 1, 3, 3], "strides": 1, "pads": 0, "dilations": 1, "groups": 3},
|
||||
{"weights_shape": [2, 3, 3, 3], "strides": 1,
|
||||
"pads": 0, "dilations": 1, "groups": 1},
|
||||
{"weights_shape": [2, 3, 3, 3], "strides": 2,
|
||||
"pads": 0, "dilations": 1, "groups": 1},
|
||||
{"weights_shape": [2, 3, 3, 3], "strides": 1,
|
||||
"pads": 1, "dilations": 1, "groups": 1},
|
||||
{"weights_shape": [2, 3, 3, 3], "strides": 1,
|
||||
"pads": 0, "dilations": 2, "groups": 1},
|
||||
{"weights_shape": [2, 3, 3, 3], "strides": 1,
|
||||
"pads": [0, 1], "dilations": 1, "groups": 1},
|
||||
{"weights_shape": [2, 3, 3, 3], "strides": 1,
|
||||
"pads": [1, 0], "dilations": 1, "groups": 1},
|
||||
{"weights_shape": [3, 1, 3, 3], "strides": 1,
|
||||
"pads": 0, "dilations": 1, "groups": 3},
|
||||
],
|
||||
)
|
||||
@pytest.mark.parametrize("bias", [True, False])
|
||||
@ -73,13 +82,10 @@ class TestQuantizedConv2D(PytorchLayerTest):
|
||||
@pytest.mark.parametrize("scale", [1, 0.3, 1.3])
|
||||
@pytest.mark.parametrize("zero_point", [0, 1])
|
||||
@pytest.mark.nightly
|
||||
# @pytest.mark.precommit Test disabled due to sporadic issues
|
||||
@pytest.mark.precommit
|
||||
def test_quantized_conv2d(self, params, bias, relu, scale, zero_point, ie_device, precision, ir_version):
|
||||
self._test(
|
||||
*self.create_model(**params, bias=bias, relu=relu, scale=scale, zero_point=zero_point),
|
||||
ie_device,
|
||||
precision,
|
||||
ir_version,
|
||||
trace_model=True,
|
||||
freeze_model=False
|
||||
*self.create_model(**params, bias=bias, relu=relu,
|
||||
scale=scale, zero_point=zero_point),
|
||||
ie_device, precision, ir_version, trace_model=True, freeze_model=False, quantized_ops=True, quant_size=scale
|
||||
)
|
||||
|
@ -72,13 +72,13 @@ class TestQuantizedLinear(PytorchLayerTest):
|
||||
@pytest.mark.parametrize("zero_point", [0, 1])
|
||||
@pytest.mark.parametrize("trace", [True, False])
|
||||
@pytest.mark.nightly
|
||||
# @pytest.mark.precommit Test disabled due to sporadic issues
|
||||
@pytest.mark.precommit
|
||||
def test_quantized_linear(self, params, scale, zero_point, trace, ie_device, precision, ir_version):
|
||||
input_shape = params.get("input_shape")
|
||||
weight_shape = params.get("weight_shape")
|
||||
bias = params.get("bias", False)
|
||||
self._test(*self.create_model(weight_shape, bias, scale, zero_point), ie_device, precision, ir_version,
|
||||
kwargs_to_prepare_input={"input_shape": input_shape}, trace_model=trace, freeze_model=False)
|
||||
kwargs_to_prepare_input={"input_shape": input_shape}, trace_model=trace, freeze_model=False, quantized_ops=True, quant_size=scale)
|
||||
|
||||
@pytest.mark.parametrize("trace", [True, False])
|
||||
@pytest.mark.parametrize("inplace", [True, False])
|
||||
@ -86,4 +86,4 @@ class TestQuantizedLinear(PytorchLayerTest):
|
||||
@pytest.mark.precommit
|
||||
def test_quantized_hardtanh_linear(self, trace, inplace, ie_device, precision, ir_version):
|
||||
self._test(*self.create_hardtanh_model([10, 9], True, 1, 0.3, inplace), ie_device, precision, ir_version,
|
||||
kwargs_to_prepare_input={"input_shape": [2, 3, 9]}, trace_model=trace, freeze_model=False)
|
||||
kwargs_to_prepare_input={"input_shape": [2, 3, 9]}, trace_model=trace, freeze_model=False, quantized_ops=True, quant_size=0.3)
|
||||
|
Loading…
Reference in New Issue
Block a user