[CONFORMANCE] Improve python test utils (#15329)

* Remove download of vodels + move some methods to utils

* Separate constants

* filelist

* separate conformance utilities

* Update script according new utils

* Fix subgraphdumper crash

* Some small improvements for api conformance

* add warn_message

* One short fix

* fix master
This commit is contained in:
Irina Efode 2023-01-28 00:02:02 +04:00 committed by GitHub
parent 290947da84
commit e88210c95d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
14 changed files with 311 additions and 393 deletions

View File

@ -196,10 +196,9 @@ OPCache::serialize_function(const std::pair<std::shared_ptr<ov::Node>, LayerTest
}
std::replace(op_name.begin(), op_name.end(), '/', '_');
std::replace(op_name.begin(), op_name.end(), '\\', '_');
auto filePrefix = CommonTestUtils::generateTestFilePrefix();
auto xml_path = current_op_folder + CommonTestUtils::FileSeparator + filePrefix + op_name + ".xml";
auto bin_path = current_op_folder + CommonTestUtils::FileSeparator + filePrefix + op_name + ".bin";
auto meta_info = current_op_folder + CommonTestUtils::FileSeparator + filePrefix + op_name + ".meta";
auto xml_path = current_op_folder + CommonTestUtils::FileSeparator + op_name + ".xml";
auto bin_path = current_op_folder + CommonTestUtils::FileSeparator + op_name + ".bin";
auto meta_info = current_op_folder + CommonTestUtils::FileSeparator + op_name + ".meta";
auto cnn_net = InferenceEngine::CNNNetwork(function);
cnn_net.serialize(xml_path, bin_path);
serialize_meta_info(op.second, meta_info);

View File

@ -8,7 +8,8 @@ import zipfile
import argparse
import openpyxl
from utils import utils
from utils.conformance_utils import get_logger
from utils import constants
LOGS_ZIP_NAME = 'logs.zip'
@ -17,7 +18,7 @@ NEW_LOG_DIR = 'ie_logs'
SW_PLUGINS = {'HETERO': '1', 'AUTO': '2', 'BATCH': '3', 'MULTI': '4'}
logger = utils.get_logger('AnalyzerConformanceLog')
logger = get_logger('AnalyzerConformanceLog')
class AnalyzerConformanceLog:
@ -90,7 +91,7 @@ class AnalyzerConformanceLog:
test_info_by_device = None
for line in lines:
if utils.RUN in line:
if constants.RUN in line:
in_run_stage = True
error_msg = ''
# if run stage exists, it is because gtest decided to show log as test fails
@ -98,7 +99,7 @@ class AnalyzerConformanceLog:
continue
# it is result, we got to the end of run stage
if utils.TEST_STATUS['failed'] in line:
if constants.TEST_STATUS['failed'][0] in line:
in_run_stage = False
if error_msg:
test_info_by_device['err_info'] = error_msg

View File

@ -2,14 +2,14 @@ import os
import re
import argparse
from utils import utils
from utils.conformance_utils import get_logger
from pathlib import Path
import xml.etree.ElementTree as ET
from jinja2 import Environment, FileSystemLoader
logger = utils.get_logger('HighlightTable')
logger = get_logger('HighlightTable')
OPSET_REPORT_NAME_RE = r'.*report_opset_\w*.xml'
API_REPORT_NAME_RE = r'.*report_api_\w*.xml'

View File

@ -8,7 +8,8 @@ import glob
import defusedxml.ElementTree as ET
from defusedxml import defuse_stdlib
from utils import utils
from utils.conformance_utils import get_logger
from utils import stat_update_utils
# defuse_stdlib provide patched version of xml.etree.ElementTree which allows to use objects from xml.etree.ElementTree
# in a safe manner without including unsafe xml.etree.ElementTree
@ -16,7 +17,7 @@ ET_defused = defuse_stdlib()[ET]
Element = ET_defused.Element
SubElement = ET_defused.SubElement
logger = utils.get_logger('XmlMerger')
logger = get_logger('xml_merge_tool')
def parse_arguments():
parser = argparse.ArgumentParser()
@ -54,7 +55,7 @@ def update_result_node(xml_node: SubElement, aggregated_res: SubElement):
def aggregate_test_results(aggregated_results: SubElement, xml_reports: list, report_type: str):
aggregated_timestamp = None
for xml in xml_reports:
logger.info(f" Processing: {xml}")
# logger.info(f" Processing: {xml}")
try:
xml_root = ET.parse(xml).getroot()
except ET.ParseError:
@ -131,10 +132,10 @@ def merge_xml(input_folder_paths: list, output_folder_paths: str, output_filenam
SubElement(entity_list, entity.tag)
timestamp = aggregate_test_results(results, xml_reports, report_type)
if report_type == "OP":
utils.update_passrates(results)
stat_update_utils.update_passrates(results)
else:
for sub_result in results:
utils.update_passrates(sub_result)
stat_update_utils.update_passrates(sub_result)
summary.set("timestamp", timestamp)
logger.info(f" Processing is finished")

View File

@ -6,16 +6,39 @@ import defusedxml.ElementTree as ET
from argparse import ArgumentParser
from pathlib import Path
from hashlib import sha256
from utils import utils
from utils.conformance_utils import get_logger, set_env_variable
from utils.constants import PY_OPENVINO, LD_LIB_PATH_NAME, PYTHON_NAME
from utils.file_utils import get_ov_path, find_latest_dir
from openvino.runtime import Core
import os
logger = get_logger('rename_conformance_ir')
try:
from openvino.runtime import Core
except:
script_dir, _ = os.path.split(os.path.abspath(__file__))
ov_bin_path = get_ov_path(script_dir, None, True)
if PY_OPENVINO in os.listdir(ov_bin_path):
env = os.environ
py_ov = os.path.join(ov_bin_path, PY_OPENVINO)
py_ov = os.path.join(py_ov, find_latest_dir(py_ov))
env = set_env_variable(env, "PYTHONPATH", py_ov)
env = set_env_variable(env, LD_LIB_PATH_NAME, ov_bin_path)
logger.warning("Set the following env varibles to rename conformance ir based on hash: ")
logger.warning(f'PYTHONPATH={env["PYTHONPATH"]}')
logger.warning(f'{LD_LIB_PATH_NAME}={env[LD_LIB_PATH_NAME]}')
exit(0)
else:
print(f'Impossible to run the tool! PyOpenVINO was not built!')
exit(-1)
XML_EXTENSION = ".xml"
BIN_EXTENSION = ".bin"
META_EXTENSION = ".meta"
logger = utils.get_logger('Rename Conformance IRs using hash')
def parse_arguments():
parser = ArgumentParser()
@ -77,7 +100,7 @@ def create_hash(in_dir_path: Path):
meta_path.rename(Path(meta_path.parent, new_name + META_EXTENSION))
bin_path.rename(Path(bin_path.parent, new_name + BIN_EXTENSION))
logger.info(f"{old_name} -> {new_name}")
# logger.info(f"{old_name} -> {new_name}")
if __name__=="__main__":
args = parse_arguments()

View File

@ -1,4 +1,3 @@
jinja2==3.1.2
gitpython
defusedxml>=0.7.1
openpyxl==3.0.10

View File

@ -1,79 +1,39 @@
from asyncio import subprocess
from queue import Empty
from git import Repo
# Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from argparse import ArgumentParser
from utils import utils
from subprocess import Popen
from shutil import copytree, rmtree
from summarize import create_summary
from merge_xmls import merge_xml
from run_parallel import TestParallelRunner
from pathlib import Path, PurePath
from sys import version, platform
from pathlib import Path
import defusedxml.ElementTree as ET
from urllib.parse import urlparse
import os
import urllib.request as ur
from utils import constants
from utils.conformance_utils import get_logger
from utils import file_utils
logger = utils.get_logger('ConformanceRunner')
OPENVINO_NAME = 'openvino'
OMZ_REPO_URL = "https://github.com/openvinotoolkit/open_model_zoo.git"
OMZ_REPO_BRANCH = "master"
GTEST_PARALLEL_URL = "https://github.com/intel-innersource/frameworks.ai.openvino.ci.infrastructure.git"
GTEST_PARALLEL_BRANCH = "master"
logger = get_logger('conformance_runner')
is_hash = True
try:
from rename_conformance_ir import create_hash
except:
logger.warning("Please set the above env variable to get the same conformance ir names run by run!")
is_hash = False
API_CONFORMANCE_BIN_NAME = "apiConformanceTests"
OP_CONFORMANCE_BIN_NAME = "conformanceTests"
SUBGRAPH_DUMPER_BIN_NAME = "subgraphsDumper"
DEBUG_DIR = "Debug"
RELEASE_DIR = "Release"
IS_WIN = "windows" in platform or "win32" in platform
OS_SCRIPT_EXT = ".bat" if IS_WIN else ""
OS_BIN_FILE_EXT = ".exe" if IS_WIN else ""
NO_MODEL_CONSTANT = "NO_MODEL"
ENV_SEPARATOR = ";" if IS_WIN else ":"
PYTHON_NAME = "python" if IS_WIN else "python3"
PIP_NAME = "pip" if IS_WIN else "pip3"
NO_MODEL_CONSTANT = "http://ov-share-03.sclab.intel.com/Shares/conformance_ir/dlb/master/2022.3.0-8953-8c3425ff698.tar"
SCRIPT_DIR_PATH, SCRIPT_NAME = os.path.split(os.path.abspath(__file__))
def find_latest_dir(in_dir: Path, pattern_list = list()):
get_latest_dir = lambda path: sorted(Path(path).iterdir(), key=os.path.getmtime)
entities = get_latest_dir(in_dir)
entities.reverse()
for entity in entities:
if entity.is_dir():
if not pattern_list:
return entity
else:
for pattern in pattern_list:
if pattern in str(os.fspath(PurePath(entity))):
return entity
logger.error(f"{in_dir} does not contain applicable directories to patterns: {pattern_list}")
exit(-1)
def get_ov_path(ov_dir=None, is_bin=False):
if ov_dir is None or not os.path.isdir(ov_dir):
if 'INTEL_OPENVINO_DIR' in os.environ:
ov_dir = os.environ['INTEL_OPENVINO_DIR']
else:
ov_dir = os.path.abspath(SCRIPT_DIR_PATH)[:os.path.abspath(SCRIPT_DIR_PATH).find(OPENVINO_NAME) + len(OPENVINO_NAME)]
if is_bin:
ov_dir = os.path.join(ov_dir, find_latest_dir(ov_dir, ['bin']))
ov_dir = os.path.join(ov_dir, find_latest_dir(ov_dir))
ov_dir = os.path.join(ov_dir, find_latest_dir(ov_dir, [DEBUG_DIR, RELEASE_DIR]))
return ov_dir
def get_default_working_dir():
path = Path(__file__).parent.resolve()
return os.path.join(path, "temp")
@ -81,9 +41,9 @@ def get_default_working_dir():
def parse_arguments():
parser = ArgumentParser()
models_path_help = "Path to the directory/ies containing models to dump subgraph (the default way is to download OMZ). If --s=0, specify the Conformance IRs directory"
models_path_help = "Path to the directory/ies containing models to dump subgraph (the default way is to download conformance IR). It may be directory, archieve file, .lst file or http link to download something . If --s=0, specify the Conformance IRs directory"
device_help = " Specify the target device. The default value is CPU"
ov_help = "OV binary files path. The default way is try to find installed OV by INTEL_OPENVINO_DIR in environmet variables or to find the absolute path of OV repo (by using script path)"
ov_help = "OV repo path. The default way is try to find the absolute path of OV repo (by using script path)"
working_dir_help = "Specify a working directory to save all artifacts, such as reports, models, conformance_irs, etc."
type_help = "Specify conformance type: `OP` or `API`. The default value is `OP`"
workers_help = "Specify number of workers to run in parallel. The default value is CPU count - 1"
@ -93,123 +53,56 @@ def parse_arguments():
parser.add_argument("-m", "--models_path", help=models_path_help, type=str, required=False, default=NO_MODEL_CONSTANT)
parser.add_argument("-d", "--device", help= device_help, type=str, required=False, default="CPU")
parser.add_argument("-ov", "--ov_path", help=ov_help, type=str, required=False, default=get_ov_path())
parser.add_argument("-ov", "--ov_path", help=ov_help, type=str, required=False, default=file_utils.get_ov_path(SCRIPT_DIR_PATH))
parser.add_argument("-w", "--working_dir", help=working_dir_help, type=str, required=False, default=get_default_working_dir())
parser.add_argument("-t", "--type", help=type_help, type=str, required=False, default="OP")
parser.add_argument("-j", "--workers", help=workers_help, type=int, required=False, default=os.cpu_count()-1)
parser.add_argument("--gtest_filter", help=gtest_filter_helper, type=str, required=False, default="*")
parser.add_argument("-c", "--ov_config_path", help=ov_config_path_helper, type=str, required=False, default="")
parser.add_argument("-s", "--dump_conformance", help=dump_conformance_help, type=int, required=False, default=1)
parser.add_argument("-s", "--dump_conformance", help=dump_conformance_help, type=int, required=False, default=0)
return parser.parse_args()
def set_env_variable(env: os.environ, var_name: str, var_value: str):
if var_name in env:
env[var_name] = var_value + ENV_SEPARATOR + env[var_name]
else:
env[var_name] = var_value
return env
class Conformance:
def __init__(self, device:str, model_path:os.path, ov_path:os.path, type:str, workers:int,
gtest_filter:str, working_dir:os.path, ov_config_path:os.path):
self._device = device
self._model_path = model_path
self._ov_path = ov_path
self._ov_bin_path = get_ov_path(self._ov_path, True)
self._ov_bin_path = file_utils.get_ov_path(SCRIPT_DIR_PATH, self._ov_path, True)
self._working_dir = working_dir
if not os.path.exists(self._working_dir):
os.mkdir(self._working_dir)
if os.path.exists(self._working_dir):
logger.info(f"Working dir {self._working_dir} is cleaned up")
rmtree(self._working_dir)
os.mkdir(self._working_dir)
if not (type == "OP" or type == "API"):
logger.error(f"Incorrect conformance type: {type}. Please use 'OP' or 'API'")
exit(-1)
self._type = type
self._workers = workers
self._gtest_filter = gtest_filter
if not os.path.exists(ov_config_path) and ov_config_path != "":
logger.error(f"Specified config file does not exist: {ov_config_path}.")
exit(-1)
self._ov_config_path = ov_config_path
def __download_repo(self, https_url: str, version: str):
repo_name = https_url[https_url.rfind('/') + 1:len(https_url) - 4]
repo_path = os.path.join(self._working_dir, repo_name)
if os.path.isdir(repo_path):
logger.info(f'Repo: {repo_name} exists in {self._working_dir}. Skip the repo download.')
repo = Repo(repo_path)
else:
logger.info(f'Started to clone repo: {https_url} to {repo_path}')
repo = Repo.clone_from(https_url, repo_path)
repo.submodule_update(recursive=True)
logger.info(f'Repo {https_url} was cloned sucessful')
remote_version = "origin/" + version
if remote_version in repo.git.branch('-r').replace(' ', '').split('\n'):
repo.git.checkout(version)
repo.git.pull()
repo.submodule_update(recursive=True)
logger.info(f'Repo {https_url} is on {version}')
return repo_path
def _convert_models(self):
omz_tools_path = os.path.join(self._omz_path, "tools", "model_tools")
original_model_path = os.path.join(self._working_dir, "original_models")
converted_model_path = os.path.join(self._working_dir, "converted_models")
if os.path.isdir(original_model_path):
logger.info(f"Original model path: {original_model_path} is removed")
rmtree(original_model_path)
if os.path.isdir(converted_model_path):
logger.info(f"Converted model path: {converted_model_path} is removed")
rmtree(converted_model_path)
mo_path = os.path.join(self._ov_path, "tools", "mo")
ov_python_path = os.path.join(self._ov_bin_path, "python_api", f"python{version[0:3]}")
convert_model_env = os.environ.copy()
ld_lib_path_name = ""
# Windows or MacOS
if IS_WIN or platform == "darwin":
ld_lib_path_name = "PATH"
# Linux
elif "lin" in platform:
ld_lib_path_name = "LD_LIBRARY_PATH"
convert_model_env = set_env_variable(convert_model_env, ld_lib_path_name, self._ov_bin_path)
convert_model_env = set_env_variable(convert_model_env, "PYTHONPATH", f"{ov_python_path}{ENV_SEPARATOR}{mo_path}")
convert_model_env = set_env_variable(convert_model_env, "OMZ_ROOT", self._omz_path)
logger.info(f"Model conversion from {original_model_path} to {converted_model_path} is started")
activate_path = os.path.join(".env3", "bin", "activate")
command = f'cd "{self._working_dir}"; ' \
f'{"" if os.path.isdir(".env3") else f"{PYTHON_NAME} -m venv .env3; "} '\
f'{"" if IS_WIN else "source"} {activate_path}{OS_SCRIPT_EXT}; '\
f'{PIP_NAME} install -e "{mo_path}/.[caffe,kaldi,mxnet,onnx,pytorch,tensorflow2]"; ' \
f'{PIP_NAME} install "{omz_tools_path}/.[paddle,pytorch,tensorflow]"; ' \
f'omz_downloader --all --output_dir="{original_model_path}"; '\
f'omz_converter --all --download_dir="{original_model_path}" --output_dir="{converted_model_path}"; '\
f'deactivate'
def __download_conformance_ir(self):
_, file_name = os.path.split(urlparse(self._model_path).path)
model_archieve_path = os.path.join(self._working_dir, file_name)
try:
process = Popen(command, shell=True, env=convert_model_env)
out, err = process.communicate()
if err is None:
for line in str(out).split('\n'):
logger.info(line)
else:
logger.error(err)
exit(-1)
logger.info(f"Model conversion is successful. Converted models are saved to {converted_model_path}")
logger.info(f"Conformance IRs will be downloaded from {self._model_path} to {model_archieve_path}")
ur.urlretrieve(self._model_path, filename=model_archieve_path)
except:
logger.error(f"Something is wrong with the model conversion! Abort the process")
logger.error(f"Please verify URL: {self._model_path}. Looks like that is incorrect")
exit(-1)
return converted_model_path
logger.info(f"Conformance IRs were downloaded from {self._model_path} to {model_archieve_path}")
if not file_utils.is_archieve(model_archieve_path):
logger.error(f"The file {model_archieve_path} is not archieve! It should be the archieve!")
exit()
self._model_path = file_utils.unzip_archieve(model_archieve_path, self._working_dir)
def download_and_convert_models(self):
logger.info("Starting model downloading and conversion")
self._omz_path = self.__download_repo(OMZ_REPO_URL, OMZ_REPO_BRANCH)
self._model_path = self._convert_models()
logger.info("Model downloading and conversion is finished successful")
def dump_subgraph(self):
subgraph_dumper_path = os.path.join(self._ov_bin_path, f'{SUBGRAPH_DUMPER_BIN_NAME}{OS_BIN_FILE_EXT}')
def __dump_subgraph(self):
subgraph_dumper_path = os.path.join(self._ov_bin_path, f'{SUBGRAPH_DUMPER_BIN_NAME}{constants.OS_BIN_FILE_EXT}')
if not os.path.isfile(subgraph_dumper_path):
logger.error(f"{subgraph_dumper_path} is not exist!")
exit(-1)
@ -231,13 +124,18 @@ class Conformance:
logger.error("Process failed on step: 'Subgraph dumping'")
exit(-1)
self._model_path = conformance_ir_path
def run_conformance(self):
conformance_path = None
if self._type == "OP":
conformance_path = os.path.join(self._ov_bin_path, f'{OP_CONFORMANCE_BIN_NAME}{OS_BIN_FILE_EXT}')
if is_hash:
create_hash(Path(self._model_path))
logger.info(f"All conformance IRs in {self._ov_bin_path} were renamed based on hash")
else:
conformance_path = os.path.join(self._ov_bin_path, f'{API_CONFORMANCE_BIN_NAME}{OS_BIN_FILE_EXT}')
logger.warning("The OV Python was not built or Environment was not updated to requirments. Skip the step to rename Conformance IR based on a hash")
def __run_conformance(self):
conformance_path = None
if self._type == constants.OP_CONFORMANCE:
conformance_path = os.path.join(self._ov_bin_path, f'{OP_CONFORMANCE_BIN_NAME}{constants.OS_BIN_FILE_EXT}')
else:
conformance_path = os.path.join(self._ov_bin_path, f'{API_CONFORMANCE_BIN_NAME}{constants.OS_BIN_FILE_EXT}')
if not os.path.isfile(conformance_path):
logger.error(f"{conformance_path} is not exist!")
@ -254,29 +152,30 @@ class Conformance:
if not os.path.isdir(logs_dir):
os.mkdir(logs_dir)
try:
command_line_args = [f"--device={self._device}", f'--input_folders="{self._model_path}"',
f"--report_unique_name", f'--output_folder="{parallel_report_dir}"',
f'--gtest_filter={self._gtest_filter}', f'--config_path "{self._ov_config_path}"']
conformance = TestParallelRunner(f"{conformance_path}", command_line_args, self._workers, logs_dir, "")
conformance.run()
conformance.postprocess_logs()
except:
logger.error(f"Please check the output from `parallel_runner`. Something is wrong")
exit(-1)
command_line_args = [f"--device={self._device}", f'--input_folders="{self._model_path}"',
f"--report_unique_name", f'--output_folder="{parallel_report_dir}"',
f'--gtest_filter={self._gtest_filter}', f'--config_path="{self._ov_config_path}"']
conformance = TestParallelRunner(f"{conformance_path}", command_line_args, self._workers, logs_dir, "")
conformance.run()
conformance.postprocess_logs()
final_report_name = f'report_{self._type}'
# API Conformance contains both report type
merge_xml([parallel_report_dir], report_dir, final_report_name, self._type)
if self._type == constants.API_CONFORMANCE:
final_op_report_name = f'report_{constants.OP_CONFORMANCE}'
merge_xml([parallel_report_dir], report_dir, final_op_report_name, constants.OP_CONFORMANCE)
logger.info(f"Conformance is successful. XML reportwas saved to {report_dir}")
return (os.path.join(report_dir, final_report_name + ".xml"), report_dir)
def summarize(self, xml_report_path:os.path, report_dir: os.path):
def __summarize(self, xml_report_path:os.path, report_dir: os.path):
summary_root = ET.parse(xml_report_path).getroot()
create_summary(summary_root, report_dir, [], "", "", False, True)
copytree(os.path.join(SCRIPT_DIR_PATH, "template"), os.path.join(report_dir, "template"))
logger.info(f"Report was saved to {os.path.join(report_dir, 'report.html')}")
def start_pipeline(self, dump_models: bool):
command = f'{PIP_NAME} install -r {os.path.join(SCRIPT_DIR_PATH, "requirements.txt")}'
def run(self, dump_models: bool):
command = f'{constants.PIP_NAME} install -r {os.path.join(SCRIPT_DIR_PATH, "requirements.txt")}'
process = Popen(command, shell=True)
out, err = process.communicate()
if err is None:
@ -296,16 +195,19 @@ class Conformance:
logger.info(f"[ARGUMENTS] --ov_config_path = {self._ov_config_path}")
logger.info(f"[ARGUMENTS] --dump_conformance = {dump_models}")
if self._model_path == NO_MODEL_CONSTANT or file_utils.is_url(self._model_path):
self.__download_conformance_ir()
if dump_models:
if self._model_path == NO_MODEL_CONSTANT:
self.download_and_convert_models()
self.dump_subgraph()
self.__dump_subgraph()
if not os.path.exists(self._model_path):
logger.error(f"The model direstory {self._model_path} does not exist!")
exit(-1)
if not os.path.exists(self._model_path):
logger.error(f"Directory {self._model_path} does not exist")
exit(-1)
xml_report, report_dir = self.run_conformance()
xml_report, report_dir = self.__run_conformance()
if self._type == "OP":
self.summarize(xml_report, report_dir)
self.__summarize(xml_report, report_dir)
if __name__ == "__main__":
args = parse_arguments()
@ -313,4 +215,4 @@ if __name__ == "__main__":
args.ov_path, args.type,
args.workers, args.gtest_filter,
args.working_dir, args.ov_config_path)
conformance.start_pipeline(args.dump_conformance)
conformance.run(args.dump_conformance)

View File

@ -1,13 +1,14 @@
# Copyright (C) 2018-2023 Intel Corporation
# Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from utils import utils
from utils.conformance_utils import get_logger, progressbar
from utils import constants
from utils import file_utils
from argparse import ArgumentParser
from subprocess import Popen, STDOUT, TimeoutExpired
from hashlib import sha256
from pathlib import Path
from shutil import rmtree, copyfile
from zipfile import ZipFile, is_zipfile
from shutil import rmtree
import os
import sys
@ -15,7 +16,6 @@ import threading
import platform
import csv
import datetime
import tarfile
if sys.version_info.major >= 3:
import _thread as thread
@ -26,18 +26,8 @@ FILENAME_LENGTH = 255
LOG_NAME_REPLACE_STR = "##NAME##"
DEFAULT_PROCESS_TIMEOUT = 3600
MAX_LENGHT = 4096 if platform.system() != "Windows" else 8191
TEST_STATUS = {
'passed': ["[ OK ]"],
'failed': ["[ FAILED ]"],
'hanged': ["Test finished by timeout"],
'crashed': ["Unexpected application crash with code", "Segmentation fault", "Crash happens", "core dumped"],
'skipped': ["[ SKIPPED ]"],
'interapted': ["interapted", "Killed"]}
RUN = "[ RUN ]"
GTEST_FILTER = "Google Test filter = "
DISABLED_PREFIX = "DISABLED_"
logger = utils.get_logger('test_parallel_runner')
logger = get_logger('test_parallel_runner')
def parse_arguments():
parser = ArgumentParser()
@ -160,32 +150,12 @@ class TestParallelRunner:
self._cache_path = os.path.join(cache_path)
head, _ = os.path.split(self._cache_path)
if not os.path.exists(head):
pass
os.mkdir(head)
self._is_save_cache = True
self._disabled_tests = list()
self._total_test_cnt = 0
def __unzip_archieve(self, zip_path: os.path):
_, tail = os.path.split(zip_path)
dst_path = os.path.join(self._working_dir, tail)
copyfile(zip_path, dst_path)
logger.info(f"Archieve {zip_path} was copied to {dst_path}")
dst_dir, _ = os.path.splitext(dst_path)
if tarfile.is_tarfile(zip_path):
file = tarfile.open(dst_path)
file.extractall(dst_dir)
file.close()
elif is_zipfile(zip_path):
with ZipFile(dst_path, 'r') as zObject:
zObject.extractall(path=dst_dir)
else:
logger.error(f"Impossible to extract {zip_path}")
sys.exit(-1)
logger.info(f"Archieve {dst_path} was extacted to {dst_dir}")
os.remove(dst_path)
logger.info(f"Archieve {dst_path} was removed")
return dst_dir
def __init_basic_command_line_for_exec_file(self, test_command_line: list):
command = f'{self._exec_file_path}'
@ -199,9 +169,9 @@ class TestParallelRunner:
buf = ""
for _ in argument.split(','):
input_path = argument.replace('"', '')
if os.path.isfile(input_path) and (tarfile.is_tarfile(input_path) or is_zipfile(input_path)):
input_path = self.__unzip_archieve(input_path)
buf = utils.prepare_filelist(input_path, "*.xml", logger)
if os.path.isfile(input_path) and file_utils.is_archieve(input_path):
input_path = file_utils.unzip_archieve(input_path, self._working_dir)
buf = file_utils.prepare_filelist(input_path, "*.xml")
buf += ","
argument = buf
else:
@ -230,7 +200,7 @@ class TestParallelRunner:
pos = test_name.find('#')
if pos > 0:
real_test_name = test_suite + test_name[2:pos-2]
if DISABLED_PREFIX in real_test_name:
if constants.DISABLED_PREFIX in real_test_name:
self._disabled_tests.append(real_test_name)
else:
test_list.append(f'"{self.__replace_restricted_symbols(real_test_name)}":')
@ -251,7 +221,7 @@ class TestParallelRunner:
pos = line.find(":")
time = line[:pos]
test_name = line[pos+1:]
if not DISABLED_PREFIX in test_name:
if not constants.DISABLED_PREFIX in test_name:
test_list_cache.append(TestStructure(test_name.replace("\n", ""), time))
logger.info(f"Len test_list_cache: {len(test_list_cache)}")
return test_list_cache
@ -284,7 +254,7 @@ class TestParallelRunner:
# Run crashed tests in a separed thread
if idx < len(proved_test_list):
while proved_test_list[idx]._time == -1:
while proved_test_list[idx]._time == -1 :
proved_test_list.pop(idx)
if idx >= len(proved_test_list):
break
@ -352,21 +322,6 @@ class TestParallelRunner:
logger.info(f"Total test counter is {self._total_test_cnt}")
return final_test_list
@staticmethod
def progressbar(it_num, message="", progress_bar_size=60, out=sys.stdout):
max_len = len(it_num)
if max_len == 0:
return
def show(sym_pos):
x = int(progress_bar_size * sym_pos / max_len)
print("{}[{}{}] {}/{}".format(message, "#"*x, "."*(progress_bar_size-x), sym_pos, max_len),
end='\r', file=out, flush=True)
show(0)
for i, item in enumerate(it_num):
yield item
show(i+1)
print("", flush=True, file=out)
def run(self):
if TaskManager.process_timeout == -1:
TaskManager.process_timeout = DEFAULT_PROCESS_TIMEOUT
@ -375,11 +330,9 @@ class TestParallelRunner:
commands = [f'{self._command} --gtest_filter={filter}' for filter in self.__get_filters()]
task_manager = TaskManager(commands, self._working_dir)
# from tqdm import tqdm
# for _ in tqdm(range(self._worker_num)):
for _ in self.progressbar(range(self._worker_num), "Worker initialization: ", 40):
for _ in progressbar(range(self._worker_num), "Worker initialization: ", 40):
task_manager.init_worker()
for _ in self.progressbar(range(len(commands) - self._worker_num), "Worker execution: ", 40):
for _ in progressbar(range(len(commands) - self._worker_num), "Worker execution: ", 40):
if not task_manager.update_worker():
break
task_manager.compelete_all_processes()
@ -413,7 +366,7 @@ class TestParallelRunner:
logger.info(f"Logs directory {logs_dir} is cleaned up")
rmtree(logs_dir)
os.mkdir(logs_dir)
for test_st, _ in TEST_STATUS.items():
for test_st, _ in constants.TEST_STATUS.items():
if not os.path.exists(os.path.join(logs_dir, test_st)):
os.mkdir(os.path.join(logs_dir, test_st))
hash_map = dict()
@ -426,13 +379,13 @@ class TestParallelRunner:
dir = None
test_cnt_expected = test_cnt_real_saved_now = test_cnt_real_saved_before = 0
for line in log_file.readlines():
if GTEST_FILTER in line:
line = line[line.find(GTEST_FILTER):]
if constants.GTEST_FILTER in line:
line = line[line.find(constants.GTEST_FILTER):]
test_cnt_expected = line.count(':')
if RUN in line:
test_name = line[line.find(RUN) + len(RUN) + 1:-1:]
if constants.RUN in line:
test_name = line[line.find(constants.RUN) + len(constants.RUN) + 1:-1:]
if dir is None:
for test_st, mes_list in TEST_STATUS.items():
for test_st, mes_list in constants.TEST_STATUS.items():
for mes in mes_list:
if mes in line:
dir = test_st

View File

@ -9,7 +9,8 @@ from defusedxml import defuse_stdlib
from jinja2 import Environment, FileSystemLoader
from utils import utils
from utils.conformance_utils import get_logger
from utils import stat_update_utils
# defuse_stdlib provide patched version of xml.etree.ElementTree which allows to use objects from xml.etree.ElementTree
# in a safe manner without including unsafe xml.etree.ElementTree
@ -22,7 +23,7 @@ NA = "N/A"
STATUS_CSV_ORDER = ["implemented", "passed", "failed", "skipped", "crashed", "hanged", "passrate"]
logger = utils.get_logger('Summarize')
logger = get_logger('conformance_summary')
def parse_arguments():
@ -104,7 +105,7 @@ def merge_xmls(xml_paths: list):
device_results.find(current_op_res.tag).set(attr_name, str(xml_value))
else:
device_results.append(op_result)
utils.update_passrates(summary_results)
stat_update_utils.update_passrates(summary_results)
summary.set("timestamp", timestamp)
logger.info("Merging XML files is competed")
return summary
@ -228,8 +229,8 @@ def serialize_to_csv(report_filename: str, output_dir: os.path, op_list: list, d
def create_summary(summary_root: Element, output_folder: os.path, expected_devices:list, report_tag: str, report_version: str,
is_conformance_mode: bool, is_serialize_to_csv: bool, output_filename='report'):
if is_conformance_mode:
utils.update_conformance_test_counters(summary_root, logger)
utils.update_passrates(summary_root.find("results"))
stat_update_utils.update_conformance_test_counters(summary_root)
stat_update_utils.update_passrates(summary_root.find("results"))
device_list, results, general_pass_rate, pass_rate_avg, general_test_count, trusted_ops, covered_ops = \
collect_statistic(summary_root, is_conformance_mode)

View File

@ -0,0 +1,40 @@
# Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import logging
from sys import stdout
from os import environ
from . import constants
def get_logger(app_name: str):
logging.basicConfig()
logger = logging.getLogger(app_name)
logger.setLevel(logging.INFO)
return logger
UTILS_LOGGER = get_logger('conformance_utilities')
def progressbar(it_num, message="", progress_bar_size=60, out=stdout):
max_len = len(it_num)
if max_len == 0:
return
def show(sym_pos):
x = int(progress_bar_size * sym_pos / max_len)
print("{}[{}{}] {}/{}".format(message, "#"*x, "."*(progress_bar_size-x), sym_pos, max_len),
end='\r', file=out, flush=True)
show(0)
for i, item in enumerate(it_num):
yield item
show(i+1)
print("", flush=True, file=out)
def set_env_variable(env: environ, var_name: str, var_value: str):
if var_name in env and not var_value in env[var_name]:
env[var_name] = var_value + constants.ENV_SEPARATOR + env[var_name]
else:
env[var_name] = var_value
return env

View File

@ -0,0 +1,33 @@
# Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from sys import platform
TEST_STATUS = {
'passed': ["[ OK ]"],
'failed': ["[ FAILED ]"],
'hanged': ["Test finished by timeout"],
'crashed': ["Unexpected application crash with code", "Segmentation fault", "Crash happens", "core dumped"],
'skipped': ["[ SKIPPED ]"],
'interapted': ["interapted", "Killed"]}
RUN = "[ RUN ]"
GTEST_FILTER = "Google Test filter = "
DISABLED_PREFIX = "DISABLED_"
IS_WIN = "windows" in platform or "win32" in platform
OS_SCRIPT_EXT = ".bat" if IS_WIN else ""
OS_BIN_FILE_EXT = ".exe" if IS_WIN else ""
ENV_SEPARATOR = ";" if IS_WIN else ":"
PYTHON_NAME = "python" if IS_WIN else "python3"
PIP_NAME = "pip" if IS_WIN else "pip3"
LD_LIB_PATH_NAME = "PATH" if IS_WIN or platform == "darwin" else "LD_LIBRARY_PATH"
OPENVINO_NAME = 'openvino'
PY_OPENVINO = "python_api"
DEBUG_DIR = "Debug"
RELEASE_DIR = "Release"
OP_CONFORMANCE = "OP"
API_CONFORMANCE = "API"

View File

@ -0,0 +1,91 @@
# Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import os
import tarfile
from pathlib import Path, PurePath
from shutil import copyfile
from zipfile import ZipFile, is_zipfile
from urllib.parse import urlparse
from . import constants
from . import conformance_utils
# generates file list file inside directory. Returns path to saved filelist
def prepare_filelist(input_dir: os.path, pattern: str):
filelist_path = input_dir
if os.path.isdir(filelist_path):
filelist_path = os.path.join(input_dir, "conformance_ir_files.lst")
elif os.path.isfile(filelist_path):
head, _ = os.path.split(filelist_path)
input_dir = head
if os.path.isfile(filelist_path):
conformance_utils.UTILS_LOGGER.info(f"{filelist_path} is exists! The script will update it!")
xmls = Path(input_dir).rglob(pattern)
try:
with open(filelist_path, 'w') as file:
for xml in xmls:
file.write(str(xml) + '\n')
file.close()
except:
conformance_utils.UTILS_LOGGER.warning(f"Impossible to update {filelist_path}! Something going is wrong!")
return filelist_path
def is_archieve(input_path: os.path):
return tarfile.is_tarfile(input_path) or is_zipfile(input_path)
def is_url(url: str):
try:
result = urlparse(url)
return all([result.scheme, result.netloc])
except:
return False
def unzip_archieve(zip_path: os.path, dst_path: os.path):
_, tail = os.path.split(zip_path)
dst_path = os.path.join(dst_path, tail)
if zip_path != dst_path:
copyfile(zip_path, dst_path)
conformance_utils.UTILS_LOGGER.info(f"Archieve {zip_path} was copied to {dst_path}")
dst_dir, _ = os.path.splitext(dst_path)
if tarfile.is_tarfile(zip_path):
file = tarfile.open(dst_path)
file.extractall(dst_dir)
file.close()
elif is_zipfile(zip_path):
with ZipFile(dst_path, 'r') as zObject:
zObject.extractall(path=dst_dir)
else:
conformance_utils.UTILS_LOGGER.error(f"Impossible to extract {zip_path}")
exit(-1)
conformance_utils.UTILS_LOGGER.info(f"Archieve {dst_path} was extacted to {dst_dir}")
os.remove(dst_path)
conformance_utils.UTILS_LOGGER.info(f"Archieve {dst_path} was removed")
return dst_dir
# find latest changed directory
def find_latest_dir(in_dir: Path, pattern_list = list()):
get_latest_dir = lambda path: sorted(Path(path).iterdir(), key=os.path.getmtime)
entities = get_latest_dir(in_dir)
entities.reverse()
for entity in entities:
if entity.is_dir():
if not pattern_list:
return entity
else:
for pattern in pattern_list:
if pattern in str(os.fspath(PurePath(entity))):
return entity
conformance_utils.UTILS_LOGGER.error(f"{in_dir} does not contain applicable directories to patterns: {pattern_list}")
exit(-1)
def get_ov_path(script_dir_path: os.path, ov_dir=None, is_bin=False):
if ov_dir is None or not os.path.isdir(ov_dir):
ov_dir = os.path.abspath(script_dir_path)[:os.path.abspath(script_dir_path).find(constants.OPENVINO_NAME) + len(constants.OPENVINO_NAME)]
if is_bin:
ov_dir = os.path.join(ov_dir, find_latest_dir(ov_dir, ['bin']))
ov_dir = os.path.join(ov_dir, find_latest_dir(ov_dir))
ov_dir = os.path.join(ov_dir, find_latest_dir(ov_dir, [constants.DEBUG_DIR, constants.RELEASE_DIR]))
return ov_dir

View File

@ -1,20 +1,9 @@
# Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import logging
import os
import xml.etree.ElementTree as ET
from pathlib import Path
TEST_STATUS = {'passed': "[ OK ]", 'failed': "[ FAILED ]", 'hanged': "Test finished by timeout", 'crashed': "Crash happens", 'skipped': "[ SKIPPED ]", "interapted": "interapted"}
RUN = "[ RUN ]"
def get_logger(app_name: str):
logging.basicConfig()
logger = logging.getLogger(app_name)
logger.setLevel(logging.INFO)
return logger
from . import conformance_utils
def update_passrates(results: ET.SubElement):
for device in results:
@ -36,7 +25,7 @@ def update_passrates(results: ET.SubElement):
op.set("passrate", str(round(passrate, 1)))
def update_conformance_test_counters(results: ET.SubElement, logger: logging.Logger):
def update_conformance_test_counters(results: ET.SubElement):
max_test_cnt = dict()
incorrect_ops = set()
for device in results.find("results"):
@ -63,24 +52,6 @@ def update_conformance_test_counters(results: ET.SubElement, logger: logging.Log
if test_cnt != max_test_cnt[op.tag]:
diff = max_test_cnt[op.tag] - test_cnt
op.set("skipped", str(int(op.attrib["skipped"]) + diff))
logger.warning(f'{device.tag}: added {diff} skipped tests for {op.tag}')
conformance_utils.UTILS_LOGGER.warning(f'{device.tag}: added {diff} skipped tests for {op.tag}')
update_passrates(results)
def prepare_filelist(input_dir: os.path, pattern: str, logger):
filelist_path = input_dir
if os.path.isdir(filelist_path):
filelist_path = os.path.join(input_dir, "conformance_ir_files.lst")
elif os.path.isfile(filelist_path):
head, _ = os.path.split(filelist_path)
input_dir = head
if os.path.isfile(filelist_path):
logger.info(f"{filelist_path} is exists! The script will update it!")
xmls = Path(input_dir).rglob(pattern)
try:
with open(filelist_path, 'w') as file:
for xml in xmls:
file.write(str(xml) + '\n')
file.close()
except:
logger.warning(f"Impossible to update {filelist_path}! Something going is wrong!")
return filelist_path

View File

@ -1,96 +0,0 @@
# Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import os
import glob
import argparse
def parse_arguments():
parser = argparse.ArgumentParser()
skip_config_help = "Paths to folder with skip_config_files"
input_folders_help = "Paths to folders with logs"
extend_file_help = "Extend exiting file"
parser.add_argument("-s", "--skip_config_folders", help=skip_config_help, nargs='*', required=True)
parser.add_argument("-i", "--input_logs", help=input_folders_help, nargs='*', required=True)
parser.add_argument("-e", "--extend_file", help=extend_file_help, default=False, required=False)
return parser.parse_args()
def is_conformance(content: str):
if 'conformance' in content:
return True
return False
def is_hung_test(content: str):
if content == '' or \
"SKIPPED" in content or \
"FAILED" in content or \
"Unexpected application crash!" in content or \
"PASSED" in content:
return False
return True
def get_device_name(content: str):
target_device_str = 'TargetDevice='
pos_start = content.find(target_device_str)
pos_end = content.find('\n')
return f'{content[pos_start + len(target_device_str):pos_end]}'.lower()
def get_regex(content: str):
ir_name_str = 'IR_name='
pos_start = content.find(ir_name_str)
pos_end = content.find('.xml_')
return f'.*{content[pos_start + len(ir_name_str):pos_end]}.*\n'
def get_conformance_hung_test(test_log_dirs: list):
regexp = dict()
for test_log_dir in test_log_dirs:
if not os.path.isdir(test_log_dir):
continue
for log_file in glob.glob(os.path.join(test_log_dir, '*/*')):
with open(log_file) as log:
content = log.read()
if not (is_hung_test(content) and is_conformance(content))::
print(log_file)
continue
device = get_device_name(content)
if 'arm' in content or 'arm' in log_file:
device = 'arm'
if not device in regexp.keys():
regexp.update({device: []})
if get_regex(content) in regexp[device]:
continue
regexp[device].append(get_regex(content))
for device, re_list in regexp.items():
re_list.sort()
return regexp
def save_to_file(skip_folder_paths: list, regexps: dict, extend_file: str):
for skip_folder_path in skip_folder_paths:
if not os.path.isdir(skip_folder_path):
continue
skip_files_paths = glob.glob(os.path.join(skip_folder_path, 'skip_config_*.lst'))
for skip_files_path in skip_files_paths:
for device, re_list in regexps.items():
if device in skip_files_path:
if extend_file:
with open(skip_files_path, 'r') as file:
content = file.readlines()
with open(skip_files_path, 'w') as file:
if extend_file:
file.writelines(content)
file.writelines(re_list)
if __name__ == "__main__":
args = parse_arguments()
save_to_file(args.skip_config_folders, get_conformance_hung_test(args.input_logs), args.extend_file)