[PT FE] Add torchbenchmark models to GHA tests (#21401)

* [PT FE] Add torchbenchmark models to GHA tests

* Fix teardown

* Fix requirements

* Update tests/model_hub_tests/torch_tests/torchbench_models

* Update tests/model_hub_tests/torch_tests/torchbench_models

* Update tests/model_hub_tests/torch_tests/test_torchbench.py
This commit is contained in:
Maxim Vafin 2023-12-01 09:44:12 +01:00 committed by GitHub
parent 9ecebdd202
commit db6aeb7a65
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 159 additions and 0 deletions

View File

@ -16,6 +16,7 @@ soundfile
super-image
timm
torch
torchaudio
torchvision
transformers
wheel

View File

@ -0,0 +1,59 @@
# Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import os
import sys
import subprocess
import pytest
import torch
import tempfile
from torch_utils import process_pytest_marks, get_models_list, TestTorchConvertModel
# To make tests reproducible we seed the random generator
torch.manual_seed(0)
class TestTorchbenchmarkConvertModel(TestTorchConvertModel):
_model_list_path = os.path.join(
os.path.dirname(__file__), "torchbench_models")
def setup_class(self):
super().setup_class(self)
# sd model doesn't need token but torchbench need it to be specified
os.environ['HUGGING_FACE_HUB_TOKEN'] = 'x'
torch.set_grad_enabled(False)
self.repo_dir = tempfile.TemporaryDirectory()
os.system(
f"git clone https://github.com/pytorch/benchmark.git {self.repo_dir.name}")
subprocess.check_call(
["git", "checkout", "850364ac2678b2363f086b7549254b6cb7df2e4d"], cwd=self.repo_dir.name)
m_list = get_models_list(self._model_list_path)
m_processed_list = [m for m, _, mark, _ in m_list if mark != "skip"]
subprocess.check_call(
[sys.executable, "install.py"]+m_processed_list, cwd=self.repo_dir.name)
def load_model(self, model_name, model_link):
sys.path.append(self.repo_dir.name)
from torchbenchmark import load_model_by_name
try:
model_cls = load_model_by_name(
model_name)("eval", "cpu", jit=False)
except:
model_cls = load_model_by_name(model_name)("eval", "cpu")
model, self.example = model_cls.get_module()
self.inputs = self.example
# initialize selected models
if model_name in ["BERT_pytorch", "yolov3"]:
model(*self.example)
return model
def teardown_class(self):
# cleanup tmpdir
self.repo_dir.cleanup()
@pytest.mark.parametrize("name", process_pytest_marks(_model_list_path))
@pytest.mark.nightly
def test_convert_model_all_models(self, name, ie_device):
self.run(name, None, ie_device)

View File

@ -0,0 +1,99 @@
BERT_pytorch,None
Background_Matting,None
#DALLE2_pytorch,None - Not Supported on CPU
LearningToPaint,None
Super_SloMo,None,xfail,Unsupported ops aten::l1_loss aten::mse_loss
#alexnet,None - Already tested by torchvision tests
basic_gnn_edgecnn,None,xfail,Accuracy validation failed
basic_gnn_gcn,None,xfail,Unsupported ops aten::pow_ aten::scatter_add_
basic_gnn_gin,None,xfail,Unsupported op aten::scatter_add_
basic_gnn_sage,None,xfail,Unsupported op aten::scatter_add_
#cm3leon_generate,None,skip,No install.py is found
dcgan,None
demucs,None,xfail,Unsupported op aten::lstm
#densenet121,None - Already tested by torchvision tests
#detectron2_fasterrcnn_r_101_c4,None - Already tested by det2 tests
#detectron2_fasterrcnn_r_101_dc5,None - Already tested by det2 tests
#detectron2_fasterrcnn_r_101_fpn,None - Already tested by det2 tests
#detectron2_fasterrcnn_r_50_c4,None - Already tested by det2 tests
#detectron2_fasterrcnn_r_50_dc5,None - Already tested by det2 tests
#detectron2_fasterrcnn_r_50_fpn,None - Already tested by det2 tests
#detectron2_fcos_r_50_fpn,None - Already tested by det2 tests
#detectron2_maskrcnn,None - Already tested by det2 tests
#detectron2_maskrcnn_r_101_c4,None - Already tested by det2 tests
#detectron2_maskrcnn_r_101_fpn,None - Already tested by det2 tests
#detectron2_maskrcnn_r_50_c4,None - Already tested by det2 tests
#detectron2_maskrcnn_r_50_fpn,None - Already tested by det2 tests
dlrm,None,xfail,Conversion is failed for: prim::Constant - None
#doctr_det_predictor,None - No module named 'tf2onnx'
#doctr_reco_predictor,None - No module named 'tf2onnx'
drq,None,skip,Tracer cannot infer type of SquashedNormal()
fastNLP_Bert,None,skip,tuple expected at most 1 argument got 2
functorch_dp_cifar10,None
functorch_maml_omniglot,None
#hf_Albert,None - Already tested by hf tests
#hf_Bart,None - Already tested by hf tests
#hf_Bert,None - Already tested by hf tests
#hf_Bert_large,None - Already tested by hf tests
#hf_BigBird,None - Already tested by hf tests
#hf_DistilBert,None - Already tested by hf tests
#hf_GPT2,None - Already tested by hf tests
#hf_GPT2_large,None - Already tested by hf tests
#hf_Longformer,None - Already tested by hf tests
#hf_Reformer,None - Already tested by hf tests
#hf_T5,None - Already tested by hf tests
#hf_T5_base,None - Already tested by hf tests
#hf_T5_generate,None,skip,No install.py is found
#hf_T5_large,None - Already tested by hf tests
hf_Whisper,None
#hf_clip,None,skip,No install.py is found
lennard_jones,None
llama,None,skip,Type 'Tuple[Tensor int]' cannot be traced
llama_v2_7b_16h,None,skip,Tracer cannot infer type of CausalLMOutputWithPast
maml,None,skip,element 0 of tensors does not require grad and does not have a grad_fn
maml_omniglot,None
mnasnet1_0,None
#mobilenet_v2,None - Already tested by torchvision tests
mobilenet_v2_quantized_qat,None
#mobilenet_v3_large,None - Already tested by torchvision tests
#moco,None - DistributedDataParallel/allgather requires cuda
#nanogpt,None,skip,No install.py is found
nvidia_deeprecommender,None
opacus_cifar10,None,skip,Modules that have backward hooks assigned can't be compiled
phi_1_5,None
phlippe_densenet,None
phlippe_resnet,None
pyhpc_equation_of_state,None,xfail,Accuracy validation failed
pyhpc_isoneutral_mixing,None,xfail,Accuracy validation failed
pyhpc_turbulent_kinetic_energy,None,xfail,Unsupported op aten::empty_like
pytorch_CycleGAN_and_pix2pix,None
pytorch_stargan,None,xfail,CPU plugin error: Unsupported operation of type: BatchNormInference
pytorch_unet,None
#resnet152,None - Already tested by torchvision tests
#resnet18,None - Already tested by torchvision tests
#resnet50,None - Already tested by torchvision tests
resnet50_quantized_qat,None,xfail,Accuracy validation failed
#resnext50_32x4d,None - Already tested by torchvision tests
sam,None,xfail,Unexpected type of example_input
#shufflenet_v2_x1_0,None - Already tested by torchvision tests
#simple_gpt,None,skip,No install.py is found
#simple_gpt_tp_manual,None,skip,No install.py is found
soft_actor_critic,None,skip,Tracer cannot infer type of SquashedNormal
speech_transformer,None
#squeezenet1_1,None - Already tested by torchvision tests
stable_diffusion_text_encoder,None
stable_diffusion_unet,None
tacotron2,None,skip,Can't be loaded without CUDA
#timm_efficientdet,None - Already tested by timm tests
#timm_efficientnet,None - Already tested by timm tests
#timm_nfnet,None - Already tested by timm tests
#timm_regnet,None - Already tested by timm tests
#timm_resnest,None - Already tested by timm tests
#timm_vision_transformer,None - Already tested by timm tests
#timm_vision_transformer_large,None - Already tested by timm tests
#timm_vovnet,None - Already tested by timm tests
torch_multimodal_clip,None,skip,Can't be traced
tts_angular,None,xfail,Unsupported op aten::lstm
#vgg16,None - Already tested by torchvision tests
#vision_maskrcnn,None,skip,Only tensors, lists, tuples of tensors, or dictionary of tensors can be output from traced functions
yolov3,None