[Hub Tests] Apply Python code formatting (#21286)
* Apply Python code formatting Signed-off-by: Kazantsev, Roman <roman.kazantsev@intel.com> * Apply Python code formatting Signed-off-by: Kazantsev, Roman <roman.kazantsev@intel.com> --------- Signed-off-by: Kazantsev, Roman <roman.kazantsev@intel.com>
This commit is contained in:
parent
a7de95a8a4
commit
7c590bf180
@ -3,7 +3,6 @@
|
||||
|
||||
import logging as log
|
||||
import os
|
||||
import platform
|
||||
import signal
|
||||
import sys
|
||||
import traceback
|
||||
@ -11,6 +10,7 @@ from multiprocessing import Process, Queue, TimeoutError, ProcessError
|
||||
from queue import Empty as QueueEmpty
|
||||
from typing import Callable, Union
|
||||
|
||||
|
||||
def _mp_wrapped_func(func: Callable, func_args: list, queue: Queue, logger_queue: Queue):
|
||||
"""
|
||||
Wraps callable object with exception handling. Current wrapper is a target for
|
||||
|
@ -4,16 +4,14 @@
|
||||
|
||||
import sys
|
||||
import time
|
||||
from enum import Enum
|
||||
import traceback
|
||||
import pytest
|
||||
from openvino.runtime.utils.types import openvino_to_numpy_types_map
|
||||
from enum import Enum
|
||||
|
||||
import numpy as np
|
||||
from models_hub_common.multiprocessing_utils import multiprocessing_run
|
||||
import openvino as ov
|
||||
|
||||
# noinspection PyUnresolvedReferences
|
||||
import pytest
|
||||
from models_hub_common.multiprocessing_utils import multiprocessing_run
|
||||
from openvino.runtime.utils.types import openvino_to_numpy_types_map
|
||||
|
||||
# set seed to have deterministic input data generation
|
||||
# to avoid sporadic issues in inference results
|
||||
@ -179,7 +177,7 @@ class TestModelPerformance:
|
||||
print('read model time infer {}'.format(read_model_time))
|
||||
print('read model time infer var {}'.format(read_model_time_variance))
|
||||
|
||||
infer_time_ratio = converted_model_time/read_model_time
|
||||
infer_time_ratio = converted_model_time / read_model_time
|
||||
|
||||
results.converted_infer_time = converted_model_time
|
||||
results.converted_model_time_variance = converted_model_time_variance
|
||||
@ -200,13 +198,14 @@ class TestModelPerformance:
|
||||
except:
|
||||
ex_type, ex_value, tb = sys.exc_info()
|
||||
results.error_message = "{tb}\n{ex_type}: {ex_value}".format(tb=''.join(traceback.format_tb(tb)),
|
||||
ex_type=ex_type.__name__, ex_value=ex_value)
|
||||
ex_type=ex_type.__name__, ex_value=ex_value)
|
||||
return results
|
||||
|
||||
def run(self, model_name, model_link, ie_device):
|
||||
self.result = Results()
|
||||
t0 = time.time()
|
||||
self.result = multiprocessing_run(self._run, [model_name, model_link, ie_device], model_name, self.infer_timeout)
|
||||
self.result = multiprocessing_run(self._run, [model_name, model_link, ie_device], model_name,
|
||||
self.infer_timeout)
|
||||
t1 = time.time()
|
||||
print('test running time {}'.format(t1 - t0))
|
||||
if self.result.status == Status.OK:
|
||||
|
@ -1,11 +1,11 @@
|
||||
# Copyright (C) 2018-2023 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import itertools
|
||||
import os
|
||||
import shutil
|
||||
import itertools
|
||||
import numpy as np
|
||||
|
||||
import numpy as np
|
||||
from models_hub_common.constants import test_device
|
||||
|
||||
|
||||
|
@ -2,10 +2,10 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import inspect
|
||||
import pytest
|
||||
from py.xml import html
|
||||
|
||||
import pytest
|
||||
from models_hub_common.utils import get_params
|
||||
from py.xml import html
|
||||
|
||||
|
||||
def pytest_generate_tests(metafunc):
|
||||
|
@ -2,18 +2,16 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
|
||||
import gc
|
||||
import os
|
||||
import shutil
|
||||
|
||||
import gc
|
||||
import pytest
|
||||
import tensorflow_hub as hub
|
||||
# noinspection PyUnresolvedReferences
|
||||
|
||||
from models_hub_common.constants import no_clean_cache_dir
|
||||
from models_hub_common.constants import tf_hub_cache_dir
|
||||
from models_hub_common.test_performance_model import TestModelPerformance
|
||||
from models_hub_common.utils import get_models_list
|
||||
from models_hub_common.constants import tf_hub_cache_dir
|
||||
from models_hub_common.constants import no_clean_cache_dir
|
||||
|
||||
|
||||
def clean_cache():
|
||||
|
@ -1,19 +1,20 @@
|
||||
# Copyright (C) 2018-2023 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import os
|
||||
import sys
|
||||
import math
|
||||
import tempfile
|
||||
import torch
|
||||
import pytest
|
||||
import os
|
||||
import subprocess
|
||||
from torch_utils import TestTorchConvertModel
|
||||
from openvino import convert_model, Model, PartialShape, Type
|
||||
import openvino.runtime.opset12 as ops
|
||||
from openvino.frontend import ConversionExtension
|
||||
import numpy as np
|
||||
import sys
|
||||
import tempfile
|
||||
|
||||
import numpy as np
|
||||
import openvino.runtime.opset12 as ops
|
||||
import pytest
|
||||
import torch
|
||||
from openvino import convert_model, Model, PartialShape, Type
|
||||
from openvino.frontend import ConversionExtension
|
||||
|
||||
from torch_utils import TestTorchConvertModel
|
||||
|
||||
# To make tests reproducible we seed the random generator
|
||||
torch.manual_seed(0)
|
||||
@ -79,7 +80,7 @@ class TestAlikedConvertModel(TestTorchConvertModel):
|
||||
subprocess.check_call(
|
||||
["git", "checkout", "6008af43942925eec7e32006814ef41fbd0858d8"], cwd=self.repo_dir.name)
|
||||
subprocess.check_call([sys.executable, "-m", "pip", "install",
|
||||
"-r", os.path.join(self.repo_dir.name, "requirements.txt")])
|
||||
"-r", os.path.join(self.repo_dir.name, "requirements.txt")])
|
||||
subprocess.check_call(["sh", "build.sh"], cwd=os.path.join(
|
||||
self.repo_dir.name, "custom_ops"))
|
||||
|
||||
|
@ -2,11 +2,13 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import os
|
||||
|
||||
import pytest
|
||||
import torch
|
||||
from torch_utils import TestTorchConvertModel, process_pytest_marks
|
||||
from models_hub_common.utils import get_models_list, compare_two_tensors
|
||||
|
||||
from torch_utils import TestTorchConvertModel, process_pytest_marks
|
||||
|
||||
|
||||
class TestDetectron2ConvertModel(TestTorchConvertModel):
|
||||
def setup_class(self):
|
||||
|
@ -2,12 +2,14 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import os
|
||||
|
||||
import pytest
|
||||
import torch
|
||||
from huggingface_hub import model_info
|
||||
from torch_utils import TestTorchConvertModel
|
||||
from models_hub_common.utils import cleanup_dir
|
||||
from models_hub_common.constants import hf_hub_cache_dir
|
||||
from models_hub_common.utils import cleanup_dir
|
||||
|
||||
from torch_utils import TestTorchConvertModel
|
||||
from torch_utils import process_pytest_marks
|
||||
|
||||
|
||||
@ -219,13 +221,13 @@ class TestTransformersModel(TestTorchConvertModel):
|
||||
"Number of movies": ["87", "53", "69"]}
|
||||
queries = ["What is the name of the first actor?",
|
||||
"How many movies has George Clooney played in?",
|
||||
"What is the total number of movies?",]
|
||||
"What is the total number of movies?", ]
|
||||
answer_coordinates = [[(0, 0)], [(2, 1)], [
|
||||
(0, 1), (1, 1), (2, 1)]]
|
||||
answer_text = [["Brad Pitt"], ["69"], ["209"]]
|
||||
table = pd.DataFrame.from_dict(data)
|
||||
encoded_input = tokenizer(table=table, queries=queries, answer_coordinates=answer_coordinates,
|
||||
answer_text=answer_text, padding="max_length", return_tensors="pt",)
|
||||
answer_text=answer_text, padding="max_length", return_tensors="pt", )
|
||||
example = dict(input_ids=encoded_input["input_ids"],
|
||||
token_type_ids=encoded_input["token_type_ids"],
|
||||
attention_mask=encoded_input["attention_mask"])
|
||||
@ -277,7 +279,8 @@ class TestTransformersModel(TestTorchConvertModel):
|
||||
def test_convert_model_precommit(self, name, type, ie_device):
|
||||
self.run(model_name=name, model_link=type, ie_device=ie_device)
|
||||
|
||||
@pytest.mark.parametrize("name", process_pytest_marks(os.path.join(os.path.dirname(__file__), "hf_transformers_models")))
|
||||
@pytest.mark.parametrize("name",
|
||||
process_pytest_marks(os.path.join(os.path.dirname(__file__), "hf_transformers_models")))
|
||||
@pytest.mark.nightly
|
||||
def test_convert_model_all_models(self, name, ie_device):
|
||||
self.run(model_name=name, model_link=None, ie_device=ie_device)
|
||||
|
@ -2,17 +2,15 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
import tempfile
|
||||
import torch
|
||||
|
||||
import pytest
|
||||
import subprocess
|
||||
import torch
|
||||
|
||||
from models_hub_common.test_convert_model import TestConvertModel
|
||||
from openvino import convert_model
|
||||
from torch_utils import TestTorchConvertModel
|
||||
|
||||
|
||||
# To make tests reproducible we seed the random generator
|
||||
torch.manual_seed(0)
|
||||
|
||||
@ -39,8 +37,8 @@ class TestSpeechTransformerConvertModel(TestTorchConvertModel):
|
||||
torch.stack(sorted(torch.randint(55, 250, [32]), reverse=True)),
|
||||
torch.randint(-1, 4232, [32, 20]))
|
||||
self.inputs = (torch.randn(32, 209, 320),
|
||||
torch.stack(sorted(torch.randint(55, 400, [32]), reverse=True)),
|
||||
torch.randint(-1, 4232, [32, 25]))
|
||||
torch.stack(sorted(torch.randint(55, 400, [32]), reverse=True)),
|
||||
torch.randint(-1, 4232, [32, 25]))
|
||||
return m
|
||||
|
||||
def infer_fw_model(self, model_obj, inputs):
|
||||
|
@ -2,19 +2,22 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import os
|
||||
|
||||
import pytest
|
||||
import timm
|
||||
import torch
|
||||
import pytest
|
||||
from torch_utils import TestTorchConvertModel, process_pytest_marks
|
||||
from models_hub_common.constants import hf_hub_cache_dir
|
||||
from models_hub_common.utils import cleanup_dir, get_models_list
|
||||
|
||||
from torch_utils import TestTorchConvertModel, process_pytest_marks
|
||||
|
||||
|
||||
def filter_timm(timm_list: list) -> list:
|
||||
unique_models = set()
|
||||
filtered_list = []
|
||||
ignore_set = {"base", "mini", "small", "xxtiny", "xtiny", "tiny", "lite", "nano", "pico", "medium", "big",
|
||||
"large", "xlarge", "xxlarge", "huge", "gigantic", "giant", "enormous", "xs", "xxs", "s", "m", "l", "xl"}
|
||||
"large", "xlarge", "xxlarge", "huge", "gigantic", "giant", "enormous", "xs", "xxs", "s", "m", "l",
|
||||
"xl"}
|
||||
for name in sorted(timm_list):
|
||||
# first: remove datasets
|
||||
name_parts = name.split(".")
|
||||
|
@ -2,10 +2,12 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import os
|
||||
import tempfile
|
||||
|
||||
import pytest
|
||||
import torch
|
||||
import tempfile
|
||||
import torchvision.transforms.functional as F
|
||||
|
||||
from torch_utils import process_pytest_marks, TestTorchConvertModel
|
||||
|
||||
|
||||
@ -102,7 +104,8 @@ class TestTorchHubConvertModel(TestTorchConvertModel):
|
||||
def test_convert_model_precommit(self, model_name, ie_device):
|
||||
self.run(model_name, None, ie_device)
|
||||
|
||||
@pytest.mark.parametrize("name", process_pytest_marks(os.path.join(os.path.dirname(__file__), "torchvision_models")))
|
||||
@pytest.mark.parametrize("name",
|
||||
process_pytest_marks(os.path.join(os.path.dirname(__file__), "torchvision_models")))
|
||||
@pytest.mark.nightly
|
||||
def test_convert_model_all_models(self, name, ie_device):
|
||||
self.run(name, None, ie_device)
|
||||
|
@ -3,8 +3,8 @@
|
||||
|
||||
import pytest
|
||||
import torch
|
||||
from models_hub_common.utils import get_models_list
|
||||
from models_hub_common.test_convert_model import TestConvertModel
|
||||
from models_hub_common.utils import get_models_list
|
||||
from openvino import convert_model
|
||||
|
||||
|
||||
@ -26,7 +26,9 @@ def flattenize_structure(outputs):
|
||||
|
||||
|
||||
def process_pytest_marks(filepath: str):
|
||||
return [pytest.param(n, marks=pytest.mark.xfail(reason=r) if m == "xfail" else pytest.mark.skip(reason=r)) if m else n for n, _, m, r in get_models_list(filepath)]
|
||||
return [
|
||||
pytest.param(n, marks=pytest.mark.xfail(reason=r) if m == "xfail" else pytest.mark.skip(reason=r)) if m else n
|
||||
for n, _, m, r in get_models_list(filepath)]
|
||||
|
||||
|
||||
class TestTorchConvertModel(TestConvertModel):
|
||||
|
Loading…
Reference in New Issue
Block a user