Mark models according to last validation and fix some problems in models (#20324)

* Mark models according to last validation and fix some problems in models

* Update tests/model_hub_tests/torch_tests/hf_transformers_models
This commit is contained in:
Maxim Vafin 2023-10-10 11:23:23 +02:00 committed by GitHub
parent b630bffa14
commit 0fec05ecf2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 26 additions and 15 deletions

View File

@ -1,4 +1,4 @@
COCO-Detection/fast_rcnn_R_50_FPN_1x,none
COCO-Detection/fast_rcnn_R_50_FPN_1x,none,xfail,Tracing problem
COCO-Detection/faster_rcnn_R_101_C4_3x,none
COCO-Detection/faster_rcnn_R_101_DC5_3x,none
COCO-Detection/faster_rcnn_R_101_FPN_3x,none

View File

@ -3,7 +3,7 @@ abcp4/mymodel-test,mymodel,skip,Load problem
abeja/gpt-neox-japanese-2.7b,gpt_neox_japanese
acl-submission-anonym/EAM-spectral,examuse,skip,Load problem
adalbertojunior/modular-test,modular,skip,Load problem
aerner/lm-v2,open-llama
aerner/lm-v2,open-llama,xfail,Example input problem
afonsosamarques/ardt-vanilla-combo_train_hopper_v2-2508_1336-33,decision_transformer,xfail,Tracing problem
aihijo/gec-zh-gector-bert-large,gector,skip,Load problem
albert-base-v2,albert
@ -21,7 +21,7 @@ ArthurZ/jukebox-vqvae,jukebox_vqvae,skip,Load problem
ArthurZ/persimmon-8b-base,persimmon,skip,Load problem
ashishpatel26/span-marker-bert-base-fewnerd-coarse-super,span-marker,skip,Load problem
asi/albert-act-tiny,albert_act,skip,Load problem
BAAI/AltCLIP,altclip,xfail,Unsupported op aten::numpy_T
BAAI/AltCLIP,altclip
BAAI/AquilaCode-py,aquila,skip,Load problem
bana513/opennmt-translator-en-hu,opennmt-translator,skip,Load problem
benjamin/wtp-bert-mini,bert-char,skip,Load problem
@ -79,7 +79,7 @@ facebook/musicgen-small,musicgen,skip,Load problem
facebook/opt-125m,opt
facebook/rag-token-nq,rag,skip,Load problem
facebook/sam-vit-large,sam,xfail,No node with name original_sizes
facebook/timesformer-base-finetuned-k400,timesformer,xfail,Shape inference of Add node failed: Eltwise shape infer input shapes dim index: 1 mismatch
facebook/timesformer-base-finetuned-k400,timesformer
facebook/vit-mae-base,vit_mae,xfail,Accuracy validation failed
facebook/wmt19-ru-en,fsmt,xfail,Tracing problem
facebook/xlm-roberta-xl,xlm-roberta-xl
@ -199,7 +199,7 @@ kiddothe2b/hierarchical-transformer-base-4096-v2,hat,skip,Load problem
k-l-lambda/clip-text-generator,clip_text_generator,skip,Load problem
k-l-lambda/stable-diffusion-v1-4-inv-embed,inv_word_embed,skip,Load problem
KoboldAI/fairseq-dense-13B-Janeway,xglm,skip,Large Model
konverner/qdq-camembert-apolliner,qdqbert
konverner/qdq-camembert-apolliner,qdqbert,xfail,Repository not found
krasserm/perceiver-ar-clm-base,perceiver-ar-causal-language-model,skip,Load problem
krasserm/perceiver-ar-sam-giant-midi,perceiver-ar-symbolic-audio-model,skip,Load problem
krasserm/perceiver-io-img-clf,perceiver-io-image-classifier,skip,Load problem
@ -309,7 +309,7 @@ regisss/bridgetower-newyorker-a100-8x,bridgetower
rinna/japanese-cloob-vit-b-16,cloob,skip,Load problem
Rocketknight1/tiny-random-falcon-7b,falcon
RUCAIBox/mass-base-uncased,mass,skip,Load problem
RWKV/rwkv-4-169m-pile,rwkv,xfail,Unsupported op aten::maximum
RWKV/rwkv-4-169m-pile,rwkv
sahasrarjn/interbert,BERT,skip,Load problem
saibo/genkalm-medium-gpt2,genkalm,skip,Load problem
SajjadAyoubi/clip-fa-vision,clip_vision_model

View File

@ -20,7 +20,7 @@ class TestDetectron2ConvertModel(TestConvertModel):
def load_model(self, model_name, model_link):
from detectron2 import model_zoo, export
from detectron2.modeling import build_model
from detectron2.modeling import build_model, PanopticFPN
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import CfgNode
import torchvision.transforms as transforms
@ -32,13 +32,23 @@ class TestDetectron2ConvertModel(TestConvertModel):
assert isinstance(cfg, CfgNode), "Unexpected config"
cfg.MODEL.DEVICE = "cpu"
model = build_model(cfg)
DetectionCheckpointer(model, save_to_disk=False).load(cfg.MODEL.WEIGHTS)
DetectionCheckpointer(
model, save_to_disk=False).load(cfg.MODEL.WEIGHTS)
model.eval()
inputs = [{"image": image,
"height": torch.tensor(image.shape[1]),
"width": torch.tensor(image.shape[2])}]
adapter = export.TracingAdapter(model, inputs)
# https://github.com/facebookresearch/detectron2/blob/4e80df1e58901557e2824ce3b488d30209a9be33/tools/deploy/export_model.py#L123
# This is done only for Panoptic models, but it may be incorrect to do that, because one of outputs of panoptic model is getting lost
if isinstance(model, PanopticFPN):
def inference(model, inputs):
# use do_postprocess=False so it returns ROI mask
inst = model.inference(inputs, do_postprocess=False)[0]
return [{"instances": inst}]
else:
inference = None # assume that we just call the model directly
adapter = export.TracingAdapter(model, inputs, inference)
self.example = adapter.flattened_inputs
return adapter
@ -75,7 +85,8 @@ class TestDetectron2ConvertModel(TestConvertModel):
cur_fw_res = fw_outputs[i]
cur_ov_res = ov_outputs[i]
l = min(len(cur_fw_res), len(cur_ov_res))
assert l > 0 or len(cur_fw_res) == len(cur_ov_res), "No boxes were selected."
assert l > 0 or len(cur_fw_res) == len(
cur_ov_res), "No boxes were selected."
print(f"fw_re: {cur_fw_res};\n ov_res: {cur_ov_res}")
is_ok = compare_two_tensors(cur_ov_res[:l], cur_fw_res[:l], fw_eps)
assert is_ok, "Accuracy validation failed"
@ -86,8 +97,8 @@ class TestDetectron2ConvertModel(TestConvertModel):
def test_detectron2_precommit(self, name, type, mark, reason, ie_device):
self.run(name, None, ie_device)
@pytest.mark.parametrize("name,type,mark,reason",
get_models_list(os.path.join(os.path.dirname(__file__), "detectron2_models")))
@pytest.mark.parametrize("name",
[pytest.param(n, marks=pytest.mark.xfail(reason=r)) if m == "xfail" else n for n, _, m, r in get_models_list(os.path.join(os.path.dirname(__file__), "detectron2_models"))])
@pytest.mark.nightly
def test_detectron2_all_models(self, name, type, mark, reason, ie_device):
def test_detectron2_all_models(self, name, ie_device):
self.run(name, None, ie_device)

View File

@ -114,7 +114,7 @@ class TestTorchHubConvertModel(TestConvertModel):
self.run(model_name, None, ie_device)
@pytest.mark.parametrize("name",
[pytest.param(n, marks=pytest.mark.xfail) if m == "xfail" else n for n, _, m, r in get_models_list(os.path.join(os.path.dirname(__file__), "torchvision_models"))])
[pytest.param(n, marks=pytest.mark.xfail(reason=r)) if m == "xfail" else n for n, _, m, r in get_models_list(os.path.join(os.path.dirname(__file__), "torchvision_models"))])
@pytest.mark.nightly
def test_convert_model_all_models(self, name, ie_device):
self.run(name, None, ie_device)