[CI] [GHA] Introduce conformance tests (#19841)
* check dirs * cmake, build, instlll * use make * use build dir * use target * add missing * execute conformance tests * correct path for requirements * setupvars * add API conformance * conformance as a separate job; install all necessary files * uncomment * merge * install deps * use matrix, upload expected failures to gh cache * use 8-core * use the same paths * uncomment * comment * change * use csv * add exit if there are failed tests * always upload logs * check dir * use another dir for expected_failures * upload always * rm * new key * rm unused * change * update * update * rm unused * do not exit if update is set * copy file * do not fail in failures check * use specific branch * run clean * add expected failures * uncomment * comment out * correctly add failed tests to fix_priority; check for unexpected failures in case of an update * use azure runners for conformance * use 4-core * uncomment * comment out * split deps installation * add missing deps for setup-python * print error * install certs * do not verify ssl * add ca-certificates install * uncomment * comment * pack artifacts, rm unused deps * rm unused dep * always pack artifacts * rm for func tests * use less cores * use var * store int * do not exit if non-zero code * only 4 core * use gh runner * use sudo * add missing sudo * use expected failures * uncomment * comment * create fresh expected failures * use expected failures * use specific ref and repo * use expected failures * uncomment * comment out * check caches * fix str * rm unused * restore by restore key * create dir * use array * use diff path * mv after each download * add sleeping and more logs * add saving of hash table * change * uncomment * comment * download cache entries * check dir * use better dir * uncomment * rm unused * add skip configs * update lists * rm unused dir; add docs * rm unused * rm hardcoded repo ref * rm unused options; use better name for job * rm unnecessary dir creation
This commit is contained in:
parent
b20afe850e
commit
4ad68e8ff2
105
.github/workflows/linux.yml
vendored
105
.github/workflows/linux.yml
vendored
@ -10,16 +10,12 @@ on:
|
||||
- 'docs/**'
|
||||
- '**/**.md'
|
||||
- '**.md'
|
||||
- '**/layer_tests_summary/**'
|
||||
- '**/conformance/**'
|
||||
push:
|
||||
paths-ignore:
|
||||
- '**/docs/**'
|
||||
- 'docs/**'
|
||||
- '**/**.md'
|
||||
- '**.md'
|
||||
- '**/layer_tests_summary/**'
|
||||
- '**/conformance/**'
|
||||
branches:
|
||||
- master
|
||||
- 'releases/**'
|
||||
@ -384,6 +380,101 @@ jobs:
|
||||
path: ${{ env.INSTALL_TEST_DIR }}/TEST*.xml
|
||||
if-no-files-found: 'error'
|
||||
|
||||
Conformance:
|
||||
needs: Build
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
runs-on: ubuntu-20.04-8-cores
|
||||
strategy:
|
||||
max-parallel: 2
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
# 'OP' for Opset, 'API' for API
|
||||
- TEST_TYPE: 'OP'
|
||||
- TEST_TYPE: 'API'
|
||||
env:
|
||||
DEBIAN_FRONTEND: noninteractive # to prevent apt-get from waiting user input
|
||||
INSTALL_DIR: ${{ github.workspace }}/install
|
||||
INSTALL_TEST_DIR: ${{ github.workspace }}/install/tests
|
||||
CONFORMANCE_TOOLS_DIR: ${{ github.workspace }}/install/tests/functional_test_utils/layer_tests_summary
|
||||
CONFORMANCE_ARTIFACTS_DIR: ${{ github.workspace }}/install/conformance_artifacts
|
||||
TEST_DEVICE: 'CPU'
|
||||
|
||||
steps:
|
||||
|
||||
- name: Create Directories
|
||||
run: |
|
||||
mkdir -p ${CONFORMANCE_ARTIFACTS_DIR}
|
||||
|
||||
#
|
||||
# Dependencies
|
||||
#
|
||||
|
||||
- name: Download OpenVINO package
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: openvino_package
|
||||
path: ${{ env.INSTALL_DIR }}
|
||||
|
||||
- name: Download OpenVINO tests package
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: openvino_tests
|
||||
path: ${{ env.INSTALL_TEST_DIR }}
|
||||
|
||||
- name: Extract OpenVINO packages
|
||||
run: |
|
||||
pushd ${INSTALL_DIR}
|
||||
tar -xzf openvino_package.tar.gz -C ${INSTALL_DIR}
|
||||
popd
|
||||
pushd ${INSTALL_TEST_DIR}
|
||||
tar -xzf openvino_tests.tar.gz -C ${INSTALL_DIR}
|
||||
popd
|
||||
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
|
||||
- name: Install Dependencies
|
||||
run: |
|
||||
sudo -E ${INSTALL_DIR}/install_dependencies/install_openvino_dependencies.sh -c=core -y
|
||||
|
||||
# Needed for downloading IRs from storage.openvinotoolkit with Python urllib
|
||||
sudo apt-get update && sudo apt-get install --assume-yes --no-install-recommends ca-certificates
|
||||
|
||||
python3 -m pip install -r ${CONFORMANCE_TOOLS_DIR}/requirements.txt
|
||||
|
||||
#
|
||||
# Tests
|
||||
#
|
||||
|
||||
- name: Conformance Tests
|
||||
run: |
|
||||
source ${INSTALL_DIR}/setupvars.sh
|
||||
|
||||
python3 ${CONFORMANCE_TOOLS_DIR}/run_conformance.py -ov=${INSTALL_DIR}/tests \
|
||||
-d=${TEST_DEVICE} \
|
||||
-t=${{ matrix.TEST_TYPE }} \
|
||||
-w=${CONFORMANCE_ARTIFACTS_DIR} \
|
||||
-f=${CONFORMANCE_TOOLS_DIR}/skip_configs/${TEST_DEVICE}/expected_failures_${{ matrix.TEST_TYPE }}.csv
|
||||
|
||||
- name: Pack Conformance Artifacts
|
||||
if: ${{ always() }}
|
||||
run: |
|
||||
pushd ${CONFORMANCE_ARTIFACTS_DIR}
|
||||
tar -czvf ${CONFORMANCE_ARTIFACTS_DIR}/conformance_artifacts.tar.gz *
|
||||
popd
|
||||
|
||||
- name: Upload Conformance Artifacts
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: conformance_artifacts_${{ matrix.TEST_TYPE }}-${{ env.TEST_DEVICE }}
|
||||
path: ${{ env.CONFORMANCE_ARTIFACTS_DIR }}/conformance_artifacts.tar.gz
|
||||
if-no-files-found: 'error'
|
||||
|
||||
ONNX_Runtime:
|
||||
needs: Build
|
||||
defaults:
|
||||
@ -735,7 +826,7 @@ jobs:
|
||||
path: 'openvino'
|
||||
|
||||
#
|
||||
# Initilaize OpenVINO
|
||||
# Initialize OpenVINO
|
||||
#
|
||||
|
||||
- uses: actions/setup-python@v4
|
||||
@ -938,7 +1029,7 @@ jobs:
|
||||
env:
|
||||
INSTALL_DIR: /__w/openvino/openvino/install
|
||||
INSTALL_TEST_DIR: /__w/openvino/openvino/install/tests
|
||||
PARALLEL_TEST_SCRIPT: /__w/openvino/openvino/install/tests/functional_test_utils/run_parallel.py
|
||||
PARALLEL_TEST_SCRIPT: /__w/openvino/openvino/install/tests/functional_test_utils/layer_tests_summary/run_parallel.py
|
||||
PARALLEL_TEST_CACHE: /__w/openvino/openvino/install/tests/test_cache.lst
|
||||
|
||||
steps:
|
||||
@ -977,7 +1068,7 @@ jobs:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
|
||||
- name: Install python dependencies for run_parallel.py
|
||||
run: python3 -m pip install -r ${INSTALL_TEST_DIR}/functional_test_utils/requirements.txt
|
||||
run: python3 -m pip install -r ${INSTALL_TEST_DIR}/functional_test_utils/layer_tests_summary/requirements.txt
|
||||
|
||||
- name: Restore tests execution time
|
||||
uses: actions/cache/restore@v3
|
||||
|
@ -50,7 +50,7 @@ Run the following commands in the build directory:
|
||||
make --jobs=$(nproc --all) lib_plugin_name
|
||||
```
|
||||
|
||||
## How to run using [simple conformance runner](./../../../../ie_test_utils/functional_test_utils/layer_tests_summary/run_conformance.py)
|
||||
## How to run using [simple conformance runner](./../../../../../tests/test_utils/functional_test_utils/layer_tests_summary/run_conformance.py)
|
||||
|
||||
There is a simple python runner to complete the whole conformance pipeline locally. Some steps could be excluded from the pipeline by command-line parameter configuration.
|
||||
|
||||
@ -89,7 +89,7 @@ The script has the following optional arguments:
|
||||
* `p PARALLEL_DEVICES, --parallel_devices PARALLEL_DEVICES`
|
||||
Parallel over HW devices. For example run tests over `GPU.0` and `GPU.1` in case when device are the same
|
||||
* `f EXPECTED_FAILURES, --expected_failures EXPECTED_FAILURES`
|
||||
Excepted failures list file path as csv
|
||||
Excepted failures list file path as csv. See more in the [Working with expected failures](#working-with-expected-failures) section.
|
||||
* `u EXPECTED_FAILURES_UPDATE, --expected_failures_update EXPECTED_FAILURES_UPDATE`
|
||||
Overwrite expected failures list in case same failures were fixed
|
||||
* `-cache_path CACHE_PATH`
|
||||
@ -155,7 +155,7 @@ The target is able to take the following command-line arguments:
|
||||
|
||||
> **NOTE**:
|
||||
>
|
||||
> Using [`parallel_runner`](./../../../../ie_test_utils/functional_test_utils/layer_tests_summary/run_parallel.py) tool to run a conformance suite helps to report crashed tests and collect correct statistics after unexpected crashes.
|
||||
> Using [`parallel_runner`](./../../../../../tests/test_utils/functional_test_utils/layer_tests_summary/run_parallel.py) tool to run a conformance suite helps to report crashed tests and collect correct statistics after unexpected crashes.
|
||||
> The tool is able to work in two modes:
|
||||
> * one test is run in a separate thread (first run, as the output the cache will be saved as a custom file).
|
||||
> * similar load time per one worker based on test execution time. May contain different test count per worker.
|
||||
@ -169,16 +169,26 @@ The target is able to take the following command-line arguments:
|
||||
> All arguments after `--` symbol is forwarding to `conformanceTests` target.
|
||||
>
|
||||
> If you use the `--report_unique_name` argument, run
|
||||
> [the merge xml script](./../../../../ie_test_utils/functional_test_utils/layer_tests_summary/merge_xmls.py)
|
||||
> [the merge xml script](./../../../../../tests/test_utils/functional_test_utils/layer_tests_summary/merge_xmls.py)
|
||||
> to aggregate the results to one *xml* file. Check command-line arguments with `--help` before running the command.
|
||||
> The example of usage is:
|
||||
> ```
|
||||
> python3 merge_xmls.py --input_folders=/path/to/temp_output_report_folder --output_folder=/path/to/output_report_folder --output_filename=report_aggregated
|
||||
> ```
|
||||
|
||||
## Working with expected failures
|
||||
|
||||
The `run_conformace.py` script has an optional `--expected_failures` argument which accepts a path to a csv file with a list of tests that should not be run.
|
||||
|
||||
You can find the files with the most up-to-date expected failures for different devices and conformance types [here](./../../../../../tests/test_utils/functional_test_utils/layer_tests_summary/skip_configs).
|
||||
|
||||
These files are used in [the Linux GitHub workflow](./../../../../../../.github/workflows/linux.yml) for test skip.
|
||||
|
||||
You can update the file(s) you need with either new passing tests, i.e., when something is fixed, or with new failing tests to skip them. The changes will be reflected in the GitHub actions pipeline, in the `Conformance_Tests` job.
|
||||
|
||||
## How to create a conformance report
|
||||
|
||||
Run [the summarize script](./../../../../ie_test_utils/functional_test_utils/layer_tests_summary/summarize.py) to generate `html` and `csv` report. Check command-line arguments with `--help` before running the command.
|
||||
Run [the summarize script](./../../../../../tests/test_utils/functional_test_utils/layer_tests_summary/summarize.py) to generate `html` and `csv` report. Check command-line arguments with `--help` before running the command.
|
||||
The example of using the script is:
|
||||
```
|
||||
python3 summarize.py --xml /opt/repo/infrastructure-master/thirdparty/gtest-parallel/report_opset.xml --out /opt/repo/infrastructure-master/thirdparty/gtest-parallel/ -t OP
|
||||
@ -186,7 +196,7 @@ python3 summarize.py --xml /opt/repo/infrastructure-master/thirdparty/gtest-para
|
||||
```
|
||||
python3 summarize.py --xml /opt/repo/infrastructure-master/thirdparty/gtest-parallel/report_api.xml --out /opt/repo/infrastructure-master/thirdparty/gtest-parallel/ -t API
|
||||
```
|
||||
> **NOTE**: Remember to copy [styles folder](./../../../../ie_test_utils/functional_test_utils/layer_tests_summary/template) to the output directory. It helps to provide a report with filters and other useful features.
|
||||
> **NOTE**: Remember to copy [styles folder](./../../../../../tests/test_utils/functional_test_utils/layer_tests_summary/template) to the output directory. It helps to provide a report with filters and other useful features.
|
||||
|
||||
The report contains statistics based on conformance results and filter fields at the top of the page.
|
||||
|
||||
|
@ -29,9 +29,7 @@ addIeTarget(
|
||||
$<TARGET_PROPERTY:openvino::runtime::dev,INTERFACE_INCLUDE_DIRECTORIES>
|
||||
)
|
||||
|
||||
install(PROGRAMS layer_tests_summary/run_parallel.py DESTINATION tests/functional_test_utils COMPONENT tests EXCLUDE_FROM_ALL)
|
||||
install(FILES layer_tests_summary/requirements.txt DESTINATION tests/functional_test_utils COMPONENT tests EXCLUDE_FROM_ALL)
|
||||
install(DIRECTORY layer_tests_summary/utils DESTINATION tests/functional_test_utils COMPONENT tests EXCLUDE_FROM_ALL)
|
||||
install(DIRECTORY layer_tests_summary DESTINATION tests/functional_test_utils COMPONENT tests EXCLUDE_FROM_ALL)
|
||||
|
||||
ov_build_target_faster(${TARGET_NAME}
|
||||
PCH PRIVATE "src/precomp.hpp"
|
||||
|
@ -1,23 +1,23 @@
|
||||
# Copyright (C) 2018-2023 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import csv
|
||||
import os
|
||||
import urllib.request as ur
|
||||
from argparse import ArgumentParser
|
||||
from subprocess import Popen
|
||||
from shutil import copytree, rmtree
|
||||
from summarize import create_summary, create_api_summary
|
||||
from merge_xmls import merge_xml
|
||||
from run_parallel import TestParallelRunner
|
||||
from pathlib import Path
|
||||
|
||||
import defusedxml.ElementTree as ET
|
||||
from shutil import copytree, rmtree, copyfile
|
||||
from subprocess import Popen
|
||||
from urllib.parse import urlparse
|
||||
|
||||
import os
|
||||
import csv
|
||||
import urllib.request as ur
|
||||
import defusedxml.ElementTree as ET
|
||||
|
||||
from merge_xmls import merge_xml
|
||||
from run_parallel import TestParallelRunner
|
||||
from summarize import create_summary, create_api_summary
|
||||
from utils import constants
|
||||
from utils.conformance_utils import get_logger
|
||||
from utils import file_utils
|
||||
from utils.conformance_utils import get_logger
|
||||
|
||||
logger = get_logger('conformance_runner')
|
||||
has_python_api = True
|
||||
@ -34,10 +34,12 @@ SUBGRAPH_DUMPER_BIN_NAME = "subgraphsDumper"
|
||||
SCRIPT_DIR_PATH, SCRIPT_NAME = os.path.split(os.path.abspath(__file__))
|
||||
NO_MODEL_CONSTANT = os.path.join(SCRIPT_DIR_PATH, "data", "models.lst")
|
||||
|
||||
|
||||
def get_default_working_dir():
|
||||
path = Path(__file__).parent.resolve()
|
||||
return os.path.join(path, "temp")
|
||||
|
||||
|
||||
def parse_arguments():
|
||||
parser = ArgumentParser()
|
||||
|
||||
@ -56,27 +58,31 @@ def parse_arguments():
|
||||
cache_path_help = "Path to the cache file with test_name list sorted by execution time as `.lst` file!"
|
||||
expected_failures_update_help = "Overwrite expected failures list in case same failures were fixed"
|
||||
|
||||
parser.add_argument("-d", "--device", help= device_help, type=str, required=False, default="CPU")
|
||||
parser.add_argument("-d", "--device", help=device_help, type=str, required=False, default="CPU")
|
||||
parser.add_argument("-t", "--type", help=type_help, type=str, required=False, default=constants.OP_CONFORMANCE)
|
||||
parser.add_argument("--gtest_filter", help=gtest_filter_helper, type=str, required=False, default="*")
|
||||
parser.add_argument("-w", "--working_dir", help=working_dir_help, type=str, required=False, default=get_default_working_dir())
|
||||
parser.add_argument("-m", "--models_path", help=models_path_help, type=str, required=False, default=NO_MODEL_CONSTANT)
|
||||
parser.add_argument("-w", "--working_dir", help=working_dir_help, type=str, required=False,
|
||||
default=get_default_working_dir())
|
||||
parser.add_argument("-m", "--models_path", help=models_path_help, type=str, required=False,
|
||||
default=NO_MODEL_CONSTANT)
|
||||
parser.add_argument("-ov", "--ov_path", help=ov_help, type=str, required=False, default="")
|
||||
parser.add_argument("-j", "--workers", help=workers_help, type=int, required=False, default=os.cpu_count()-1)
|
||||
parser.add_argument("-j", "--workers", help=workers_help, type=int, required=False, default=os.cpu_count() - 1)
|
||||
parser.add_argument("-c", "--ov_config_path", help=ov_config_path_helper, type=str, required=False, default="")
|
||||
parser.add_argument("-s", "--dump_graph", help=dump_graph_help, type=int, required=False, default=0)
|
||||
parser.add_argument("-sm", "--special_mode", help=special_mode_help, type=str, required=False, default="")
|
||||
parser.add_argument("-p", "--parallel_devices", help=parallel_help, type=bool, required=False, default=False)
|
||||
parser.add_argument("-f", "--expected_failures", help=expected_failures_help, type=str, required=False, default="")
|
||||
parser.add_argument("-u", "--expected_failures_update", help=expected_failures_update_help, type=bool, required=False, default=False)
|
||||
parser.add_argument("-u", "--expected_failures_update", help=expected_failures_update_help, required=False,
|
||||
default=False, action='store_true')
|
||||
parser.add_argument("--cache_path", help=cache_path_help, type=str, required=False, default="")
|
||||
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
class Conformance:
|
||||
def __init__(self, device:str, model_path:os.path, ov_path:os.path, type:str, workers:int,
|
||||
gtest_filter:str, working_dir:os.path, ov_config_path:os.path, special_mode:str,
|
||||
cache_path:str, parallel_devices:bool, expected_failures_file: str,
|
||||
def __init__(self, device: str, model_path: os.path, ov_path: os.path, type: str, workers: int,
|
||||
gtest_filter: str, working_dir: os.path, ov_config_path: os.path, special_mode: str,
|
||||
cache_path: str, parallel_devices: bool, expected_failures_file: str,
|
||||
expected_failures_update: bool):
|
||||
self._device = device
|
||||
self._model_path = model_path
|
||||
@ -107,7 +113,8 @@ class Conformance:
|
||||
logger.error(f'Incorrect value to set API scope: {special_mode}. Please check to get possible values')
|
||||
exit(-1)
|
||||
else:
|
||||
logger.error(f"Incorrect conformance type: {type}. Please use '{constants.OP_CONFORMANCE}' or '{constants.API_CONFORMANCE}'")
|
||||
logger.error(
|
||||
f"Incorrect conformance type: {type}. Please use '{constants.OP_CONFORMANCE}' or '{constants.API_CONFORMANCE}'")
|
||||
exit(-1)
|
||||
self._type = type
|
||||
self._workers = workers
|
||||
@ -117,6 +124,7 @@ class Conformance:
|
||||
self._ov_config_path = ov_config_path
|
||||
self._is_parallel_over_devices = parallel_devices
|
||||
self._expected_failures = set()
|
||||
self._unexpected_failures = set()
|
||||
self._expected_failures_file = expected_failures_file
|
||||
if os.path.isfile(expected_failures_file):
|
||||
self._expected_failures = self.__get_failed_test_from_csv(expected_failures_file)
|
||||
@ -124,25 +132,27 @@ class Conformance:
|
||||
logger.warning(f"Expected failures testlist `{self._expected_failures_file}` does not exist!")
|
||||
self._expected_failures_update = expected_failures_update
|
||||
|
||||
self.is_successful_run = False
|
||||
|
||||
def __download_models(self, url_to_download, path_to_save):
|
||||
_, file_name = os.path.split(urlparse(url_to_download).path)
|
||||
download_path = os.path.join(path_to_save, file_name)
|
||||
try:
|
||||
logger.info(f"Conformance IRs will be downloaded from {url_to_download} to {download_path}")
|
||||
ur.urlretrieve(url_to_download, filename=download_path)
|
||||
except:
|
||||
logger.error(f"Please verify URL: {url_to_download}. Looks like that is incorrect")
|
||||
except Exception as exc:
|
||||
logger.error(f"Please verify URL: {url_to_download}. It might be incorrect. See below for the full error.")
|
||||
logger.exception(f'FULL ERROR: {exc}')
|
||||
exit(-1)
|
||||
logger.info(f"Conformance IRs were downloaded from {url_to_download} to {download_path}")
|
||||
if not os.path.isfile(download_path):
|
||||
logger.error(f"{download_path} is not a file. Exit!")
|
||||
exit(-1)
|
||||
if file_utils.is_archieve(download_path):
|
||||
logger.info(f"The file {download_path} is archieve. Should be unzip to {path_to_save}")
|
||||
logger.info(f"The file {download_path} is archived. Should be unzipped to {path_to_save}")
|
||||
return file_utils.unzip_archieve(download_path, path_to_save)
|
||||
return download_path
|
||||
|
||||
|
||||
def __dump_subgraph(self):
|
||||
subgraph_dumper_path = os.path.join(self._ov_path, f'{SUBGRAPH_DUMPER_BIN_NAME}{constants.OS_BIN_FILE_EXT}')
|
||||
if not os.path.isfile(subgraph_dumper_path):
|
||||
@ -153,7 +163,9 @@ class Conformance:
|
||||
logger.info(f"Remove directory {conformance_ir_path}")
|
||||
rmtree(conformance_ir_path)
|
||||
os.mkdir(conformance_ir_path)
|
||||
self._model_path = file_utils.prepare_filelist(self._model_path, ["*.onnx", "*.pdmodel", "*.__model__", "*.pb", "*.xml", "*.tflite"])
|
||||
self._model_path = file_utils.prepare_filelist(self._model_path,
|
||||
["*.onnx", "*.pdmodel", "*.__model__", "*.pb", "*.xml",
|
||||
"*.tflite"])
|
||||
logger.info(f"Stating model dumping from {self._model_path}")
|
||||
cmd = f'{subgraph_dumper_path} --input_folders="{self._model_path}" --output_folder="{conformance_ir_path}"'
|
||||
process = Popen(cmd, shell=True)
|
||||
@ -172,11 +184,12 @@ class Conformance:
|
||||
save_rel_weights(Path(self._model_path), op_rel_weight)
|
||||
logger.info(f"All conformance IRs in {self._model_path} were renamed based on hash")
|
||||
else:
|
||||
logger.warning("The OV Python was not built or Environment was not updated to requirments. Skip the step to rename Conformance IR based on a hash")
|
||||
|
||||
logger.warning(
|
||||
"The OV Python was not built or Environment was not updated to requirements. "
|
||||
"Skip the step to rename Conformance IR based on a hash")
|
||||
|
||||
@staticmethod
|
||||
def __get_failed_test_from_csv(csv_file:str):
|
||||
def __get_failed_test_from_csv(csv_file: str):
|
||||
failures = set()
|
||||
with open(csv_file, "r") as failures_file:
|
||||
for row in csv.reader(failures_file, delimiter=','):
|
||||
@ -195,14 +208,20 @@ class Conformance:
|
||||
diff = this_run_failures.difference(self._expected_failures)
|
||||
if len(diff) > 0:
|
||||
logger.error(f"Unexpected failures: {diff}")
|
||||
exit(-1)
|
||||
self._unexpected_failures = diff
|
||||
self.is_successful_run = False
|
||||
|
||||
intersection = self._expected_failures.intersection(this_run_failures)
|
||||
if this_run_failures != self._expected_failures and self._expected_failures_update:
|
||||
logger.info(f"Expected failures file {self._expected_failures} will be updated!!!")
|
||||
# we do not want to update the expected failures file if there are failures that were not present
|
||||
# in the passed expected failures file, i.e. if len(self._unexpected_failures) > 0
|
||||
if this_run_failures != self._expected_failures and self._expected_failures_update and \
|
||||
not len(self._unexpected_failures):
|
||||
logger.info(f"Expected failures file {self._expected_failures_file} will be updated! "
|
||||
f"The following will be deleted as they are passing now: "
|
||||
f"{self._expected_failures.difference(this_failures_file)}")
|
||||
os.remove(self._expected_failures_file)
|
||||
this_failures_file = Path(this_failures_file)
|
||||
this_failures_file.rename(self._expected_failures_file)
|
||||
copyfile(this_failures_file, self._expected_failures_file)
|
||||
|
||||
self.is_successful_run = True
|
||||
|
||||
def __run_conformance(self):
|
||||
conformance_path = None
|
||||
@ -212,7 +231,7 @@ class Conformance:
|
||||
conformance_path = os.path.join(self._ov_path, f'{API_CONFORMANCE_BIN_NAME}{constants.OS_BIN_FILE_EXT}')
|
||||
|
||||
if not os.path.isfile(conformance_path):
|
||||
logger.error(f"{conformance_path} is not exist!")
|
||||
logger.error(f"{conformance_path} does not exist!")
|
||||
exit(-1)
|
||||
|
||||
logs_dir = os.path.join(self._working_dir, f'{self._device}_logs')
|
||||
@ -241,7 +260,7 @@ class Conformance:
|
||||
is_parallel_devices=self._is_parallel_over_devices,
|
||||
excluded_tests=self._expected_failures if not self._expected_failures_update else set())
|
||||
conformance.run()
|
||||
conformance.postprocess_logs()
|
||||
self.is_successful_run = conformance.postprocess_logs()
|
||||
|
||||
if os.path.isfile(self._expected_failures_file):
|
||||
self.__check_expected_failures()
|
||||
@ -249,13 +268,15 @@ class Conformance:
|
||||
final_report_name = f'report_{self._type.lower()}'
|
||||
merge_xml([parallel_report_dir], report_dir, final_report_name, self._type, True)
|
||||
|
||||
logger.info(f"Conformance is successful. XML reportwas saved to {report_dir}")
|
||||
return (os.path.join(report_dir, final_report_name + ".xml"), report_dir)
|
||||
logger.info(f"XML report was saved to {report_dir}")
|
||||
return os.path.join(report_dir, final_report_name + ".xml"), report_dir
|
||||
|
||||
def __summarize(self, xml_report_path:os.path, report_dir: os.path):
|
||||
def __summarize(self, xml_report_path: os.path, report_dir: os.path):
|
||||
if self._type == constants.OP_CONFORMANCE:
|
||||
summary_root = ET.parse(xml_report_path).getroot()
|
||||
rel_weights_path = os.path.join(self._model_path, constants.REL_WEIGHTS_FILENAME.replace(constants.REL_WEIGHTS_REPLACE_STR, self._special_mode))
|
||||
rel_weights_path = os.path.join(self._model_path,
|
||||
constants.REL_WEIGHTS_FILENAME.replace(constants.REL_WEIGHTS_REPLACE_STR,
|
||||
self._special_mode))
|
||||
create_summary(summary_root, report_dir, [], "", "", True, True, rel_weights_path)
|
||||
else:
|
||||
create_api_summary([xml_report_path], report_dir, [], "", "")
|
||||
@ -303,7 +324,7 @@ class Conformance:
|
||||
if dump_models:
|
||||
self.__dump_subgraph()
|
||||
if not os.path.exists(self._model_path):
|
||||
logger.error(f"The model direstory {self._model_path} does not exist!")
|
||||
logger.error(f"The model directory {self._model_path} does not exist!")
|
||||
exit(-1)
|
||||
if not os.path.exists(self._model_path):
|
||||
logger.error(f"Directory {self._model_path} does not exist")
|
||||
@ -311,6 +332,7 @@ class Conformance:
|
||||
xml_report, report_dir = self.__run_conformance()
|
||||
self.__summarize(xml_report, report_dir)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
args = parse_arguments()
|
||||
conformance = Conformance(args.device, args.models_path,
|
||||
@ -321,4 +343,5 @@ if __name__ == "__main__":
|
||||
args.parallel_devices, args.expected_failures,
|
||||
args.expected_failures_update)
|
||||
conformance.run(args.dump_graph)
|
||||
|
||||
if not conformance.is_successful_run:
|
||||
exit(-1)
|
||||
|
@ -331,7 +331,7 @@ class TestParallelRunner:
|
||||
if constants.DISABLED_PREFIX in real_test_name:
|
||||
self._disabled_tests.append(real_test_name)
|
||||
elif test_unit == constants.TEST_UNIT_NAME:
|
||||
tests_dict[real_test_name] = 1
|
||||
tests_dict[real_test_name] = -1
|
||||
self._total_test_cnt += 1
|
||||
elif test_unit == constants.SUITE_UNIT_NAME:
|
||||
tests_dict[test_suite] = tests_dict.get(test_suite, 0) + 1
|
||||
@ -643,8 +643,8 @@ class TestParallelRunner:
|
||||
test_results[dir] += 1
|
||||
else:
|
||||
test_results[dir] = 1
|
||||
if dir != "passed" and ref_k != None:
|
||||
fix_priority.append((ref_k, test_name))
|
||||
if dir != "passed":
|
||||
fix_priority.append((ref_k or 0, test_name))
|
||||
ref_k = None
|
||||
test_cnt_real_saved_now += 1
|
||||
test_name = None
|
||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue
Block a user