Disable LWT tests (#12740) (#12763)

(cherry picked from commit 3980672082)
This commit is contained in:
Nikita Malinin 2022-08-26 10:38:44 +02:00 committed by GitHub
parent 0266f25f9b
commit f2d6248dec
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -52,6 +52,7 @@ def run_algo(model, model_name, algorithm_config, tmp_path, reference_name):
@pytest.mark.parametrize('model_params', SPARSITY_MODELS, @pytest.mark.parametrize('model_params', SPARSITY_MODELS,
ids=['{}_{}_sparse_tuning'.format(m[0], m[1]) for m in SPARSITY_MODELS]) ids=['{}_{}_sparse_tuning'.format(m[0], m[1]) for m in SPARSITY_MODELS])
def test_sparsity_with_finetuning_algo(models, tmp_path, model_params): def test_sparsity_with_finetuning_algo(models, tmp_path, model_params):
pytest.skip()
model_name, model_framework, algo_name, preset, sparsity_level, expected_accuracy = model_params model_name, model_framework, algo_name, preset, sparsity_level, expected_accuracy = model_params
if not TORCH_AVAILABLE: if not TORCH_AVAILABLE:
@ -91,6 +92,7 @@ QUANTIZATION_MODELS = [
@pytest.mark.parametrize('model_params', QUANTIZATION_MODELS, @pytest.mark.parametrize('model_params', QUANTIZATION_MODELS,
ids=['{}_{}_quantize_tuned'.format(m[0], m[1]) for m in QUANTIZATION_MODELS]) ids=['{}_{}_quantize_tuned'.format(m[0], m[1]) for m in QUANTIZATION_MODELS])
def test_quantization_with_finetuning_algo(models, tmp_path, model_params): def test_quantization_with_finetuning_algo(models, tmp_path, model_params):
pytest.skip()
model_name, model_framework, algo_name, preset, expected_accuracy = model_params model_name, model_framework, algo_name, preset, expected_accuracy = model_params
if not TORCH_AVAILABLE: if not TORCH_AVAILABLE: