[tests] resolves skipped HF tests: 3rd batch (#21678)

* resolves skipped HF tests: 3rd batch

* remove comments

* minor corrections

* replace example input *argc -> **kwargc

* use config instead of dict in example input

* add kwargs
This commit is contained in:
Pavel Esir 2023-12-15 16:58:12 +01:00 committed by GitHub
parent c087fb4206
commit 137180bce5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 43 additions and 7 deletions

View File

@ -252,14 +252,14 @@ microsoft/speecht5_tts,speecht5,xfail,Tracing error: hangs with no error (probab
microsoft/swinv2-tiny-patch4-window8-256,swinv2
microsoft/table-transformer-detection,table-transformer
microsoft/wavlm-large,wavlm,skip,Load problem
microsoft/xclip-base-patch32,xclip,skip,Load problem
microsoft/xclip-base-patch32,xclip
microsoft/xprophetnet-large-wiki100-cased,xlm-prophetnet
miguelvictor/python-fromzero-lstmlm,lstmlm,skip,Load problem
mingzi151/test-hf-wav2vec2bert,wav2vec2bert,skip,Load problem
MIT/ast-finetuned-audioset-10-10-0.4593,audio-spectrogram-transformer,skip,Load problem
MIT/ast-finetuned-audioset-10-10-0.4593,audio-spectrogram-transformer
Mizuiro-sakura/luke-japanese-large-sentiment-analysis-wrime,luke
mlml-chip/thyme2_colon_e2e,cnlpt,skip,Load problem
mnaylor/mega-base-wikitext,mega,skip,Load problem
mnaylor/mega-base-wikitext,mega,xfail,Tracing error: Please check correctness of provided example_input (but eval was correct)
mohitsha/tiny-random-testing-bert2gpt2,encoder-decoder
MoritzLaurer/DeBERTa-v3-base-mnli-fever-anli,deberta-v2
MoritzLaurer/ernie-m-large-mnli-xnli,ernie_m
@ -267,7 +267,7 @@ mrm8488/prunebert-base-uncased-finepruned-topK-squadv2,masked_bert,skip,Load pro
muditb/headline_classifier,BertModel,skip,Load problem
nanashi161382/clip-text-deprojector,clip_text_deprojector_model,skip,Load problem
nateraw/vit-age-classifier,vit
naver-clova-ocr/bros-base-uncased,bros,skip,Load problem
naver-clova-ocr/bros-base-uncased,bros
navervision/CompoDiff-Aesthetic,CompoDiff,skip,Load problem
navervision/KELIP,kelip,skip,Load problem
NCAI/NCAI-BERT,lean_albert,skip,Load problem
@ -292,7 +292,7 @@ openai/jukebox-1b-lyrics,jukebox,skip,Load problem
openai/whisper-medium,whisper,skip,Load problem
openai-gpt,openai-gpt
OpenAssistant/oasst-rm-2-pythia-6.9b-epoch-1,gpt_neox_reward_model,skip,Load problem
openmmlab/upernet-convnext-small,upernet,skip,Load problem
openmmlab/upernet-convnext-small,upernet
openMUSE/clip-vit-large-patch14-text-enc,clip_text_model,skip,Load problem
OpenVINO/opt-125m-gptq,opt
PatrickHaller/ngme-llama-264M,ngme,skip,Load problem
@ -325,7 +325,7 @@ sciki/finetune_tinybert,finetune-tinybert,skip,Load problem
sebastian-hofstaetter/colbert-distilbert-margin_mse-T2-msmarco,ColBERT,skip,Load problem
sebastian-hofstaetter/distilbert-cat-margin_mse-T2-msmarco,BERT_Cat,skip,Load problem
sebastian-hofstaetter/idcm-distilbert-msmarco_doc,IDCM,skip,Load problem
SenseTime/deformable-detr,deformable_detr,skip,Load problem
SenseTime/deformable-detr,deformable_detr,xfail,Tracing error: Please check correctness of provided example_input (but eval was correct)
shahules786/Reward-model-gptneox-410M,rm_gptneox_config,skip,Load problem
shauray/Llava-Llama-2-7B-hf,llavallama,skip,Load problem
shauray/ViTPose,vitpose,skip,Load problem
@ -335,7 +335,7 @@ shikhartuli/flexibert-mini,flexibert,skip,Load problem
shikras/shikra-7b-delta-v1-0708,shikra,skip,Load problem
shi-labs/dinat-mini-in1k-224,dinat,xfail,Accuracy validation failed
shi-labs/nat-mini-in1k-224,nat,xfail,Accuracy validation failed
shi-labs/oneformer_ade20k_swin_large,oneformer,skip,Load problem
shi-labs/oneformer_ade20k_swin_large,oneformer,xfail,Tracing error: Please check correctness of provided example_input (but eval was correct)
shuqi/seed-encoder,seed_encoder,skip,Load problem
sijunhe/nezha-cn-base,nezha
sjiang1/codecse,roberta_for_cl,skip,Load problem

View File

@ -125,6 +125,42 @@ class TestTransformersModel(TestTorchConvertModel):
preprocessor = CLIPFeatureExtractor.from_pretrained(name)
encoded_input = preprocessor(self.image, return_tensors='pt')
example = dict(encoded_input)
elif 'xclip' in mi.tags:
from transformers import XCLIPVisionModel
model = XCLIPVisionModel.from_pretrained(name, **model_kwargs)
# needs video as input
example = {'pixel_values': torch.randn(*(16, 3, 224, 224), dtype=torch.float32)}
elif 'audio-spectrogram-transformer' in mi.tags:
example = {'input_values': torch.randn(*(1, 1024, 128), dtype=torch.float32)}
elif 'mega' in mi.tags:
from transformers import AutoModel
model = AutoModel.from_pretrained(name, **model_kwargs)
model.config.output_attentions = True
model.config.output_hidden_states = True
model.config.return_dict = True
example = dict(model.dummy_inputs)
elif 'bros' in mi.tags:
from transformers import AutoProcessor, AutoModel
processor = AutoProcessor.from_pretrained(name)
model = AutoModel.from_pretrained(name, **model_kwargs)
encoding = processor("to the moon!", return_tensors="pt")
bbox = torch.randn([1, 6, 8], dtype=torch.float32)
example = dict(input_ids=encoding["input_ids"], bbox=bbox, attention_mask=encoding["attention_mask"])
elif 'upernet' in mi.tags:
from transformers import AutoProcessor, UperNetForSemanticSegmentation
processor = AutoProcessor.from_pretrained(name)
model = UperNetForSemanticSegmentation.from_pretrained(name, **model_kwargs)
example = dict(processor(images=self.image, return_tensors="pt"))
elif 'deformable_detr' in mi.tags or 'universal-image-segmentation' in mi.tags:
from transformers import AutoProcessor, AutoModel
processor = AutoProcessor.from_pretrained(name)
model = AutoModel.from_pretrained(name, **model_kwargs)
example = dict(processor(images=self.image, task_inputs=["semantic"], return_tensors="pt"))
elif "t5" in mi.tags:
from transformers import T5Tokenizer
tokenizer = T5Tokenizer.from_pretrained(name)