Conditional compilation test can run from open source (#5629)

This commit is contained in:
Alexander Shchepetov
2021-05-26 11:43:59 +03:00
committed by GitHub
parent b5c5b6b974
commit d25e149f76
12 changed files with 342 additions and 217 deletions

View File

@@ -0,0 +1,31 @@
# Conditional compilation tests
This folder contains conditional compilation (CC) test framework code and CC tests file.
## Environment preparation:
Install Python modules required for tests:
```bash
pip3 install -r requirements.txt
```
## Run tests
```bash
pytest test_cc.py
```
**Test parameters:**
- `sea_runtool` - path to `sea_runtool.py` file.
- `collector_dir` - path to collector file parent folder.
- `artifacts` - Path to directory where test write output or read input.
- `openvino_root_dir` - Path to OpenVINO repo root directory.
**Optional:**
- `test_conf` - path to test cases .yml config.
- `openvino_ref` - Path to root directory with installed OpenVINO. If the option is not specified, CC test firstly build and install
instrumented package at `<artifacts>/ref_pkg` folder with OpenVINO repository specified in `--openvino_root_dir` option.
> If OpenVINO instrumented package has been successfuly installed, in the future you can set `--openvino_ref` parameter as `<artifacts>/ref_pkg` for better performance.
**Sample usage:**
```bash
pytest test_cc.py --sea_runtool=./thirdparty/itt_collector/runtool/sea_runtool.py --collector_dir=./bin/intel64/Release --artifacts=../artifacts --openvino_root_dir=.
```

View File

@@ -5,25 +5,24 @@
# pylint: disable=line-too-long
""" Pytest configuration for compilation tests.
Sample usage:
python3 -m pytest --test_conf=<path to test config> \
--sea_runtool=./thirdparty/itt_collector/runtool/sea_runtool.py --artifacts ./compiled test_collect.py \
--collector_dir=./bin/intel64/Release --artifacts=<path to directory where tests write output or read input> \
--openvino_ref=<Path to root directory with installed OpenVINO>
"""
"""Pytest configuration for compilation tests."""
import logging
import sys
import pytest
import yaml
from inspect import getsourcefile
from pathlib import Path
# add ../lib to imports
sys.path.insert(0, str((Path(getsourcefile(lambda: 0)) / ".." / ".." / "lib").resolve(strict=True)))
import yaml
import pytest
from path_utils import expand_env_vars # pylint: disable=import-error
from test_utils import make_build, validate_path_arg, write_session_info, SESSION_INFO_FILE # pylint: disable=import-error
log = logging.getLogger()
def pytest_addoption(parser):
@@ -32,7 +31,7 @@ def pytest_addoption(parser):
"--test_conf",
type=Path,
default=Path(__file__).parent / "test_config.yml",
help="Path to models root directory"
help="Path to models root directory",
)
parser.addoption(
"--sea_runtool",
@@ -56,6 +55,11 @@ def pytest_addoption(parser):
type=Path,
help="Path to root directory with installed OpenVINO",
)
parser.addoption(
"--openvino_root_dir",
type=Path,
help="Path to OpenVINO repository root directory",
)
def pytest_generate_tests(metafunc):
@@ -63,7 +67,7 @@ def pytest_generate_tests(metafunc):
params = []
ids = []
with open(metafunc.config.getoption('test_conf'), "r") as file:
with open(metafunc.config.getoption("test_conf"), "r") as file:
test_cases = yaml.safe_load(file)
for test in test_cases:
@@ -72,7 +76,7 @@ def pytest_generate_tests(metafunc):
if "marks" in test:
extra_args["marks"] = test["marks"]
test_id = model_path.replace('$', '').replace('{', '').replace('}', '')
test_id = model_path.replace("$", "").replace("{", "").replace("}", "")
params.append(pytest.param(test_id, Path(expand_env_vars(model_path)), **extra_args))
ids = ids + [test_id]
metafunc.parametrize("test_id, model", params, ids=ids)
@@ -81,13 +85,19 @@ def pytest_generate_tests(metafunc):
@pytest.fixture(scope="session")
def sea_runtool(request):
"""Fixture function for command-line option."""
return request.config.getoption("sea_runtool")
sea_runtool = request.config.getoption("sea_runtool", skip=True)
validate_path_arg(sea_runtool)
return sea_runtool
@pytest.fixture(scope="session")
def collector_dir(request):
"""Fixture function for command-line option."""
return request.config.getoption("collector_dir")
collector_dir = request.config.getoption("collector_dir", skip=True)
validate_path_arg(collector_dir, is_dir=True)
return collector_dir
@pytest.fixture(scope="session")
@@ -99,4 +109,59 @@ def artifacts(request):
@pytest.fixture(scope="session")
def openvino_root_dir(request):
"""Fixture function for command-line option."""
return request.config.getoption("openvino_ref")
openvino_root_dir = request.config.getoption("openvino_root_dir", skip=True)
validate_path_arg(openvino_root_dir, is_dir=True)
return openvino_root_dir
@pytest.fixture(scope="session")
def openvino_ref(request, artifacts):
"""Fixture function for command-line option.
Return path to root directory with installed OpenVINO.
If --openvino_ref command-line option is not specified firstly build and install
instrumented package with OpenVINO repository specified in --openvino_root_dir option.
"""
openvino_ref = request.config.getoption("openvino_ref")
if openvino_ref:
validate_path_arg(openvino_ref, is_dir=True)
return openvino_ref
openvino_root_dir = request.config.getoption("openvino_root_dir", skip=True)
validate_path_arg(openvino_root_dir, is_dir=True)
build_dir = openvino_root_dir / "build_instrumented"
openvino_ref_path = artifacts / "ref_pkg"
log.info("--openvino_ref is not specified. Preparing instrumented build at %s", build_dir)
return_code, output = make_build(
openvino_root_dir,
build_dir,
openvino_ref_path,
cmake_additional_args=["-DSELECTIVE_BUILD=COLLECT"],
log=log
)
assert return_code == 0, f"Command exited with non-zero status {return_code}:\n {output}"
return openvino_ref_path
@pytest.fixture(scope="function")
def test_info(request, pytestconfig):
"""Fixture function for getting the additional attributes of the current test."""
setattr(request.node._request, "test_info", {})
if not hasattr(pytestconfig, "session_info"):
setattr(pytestconfig, "session_info", [])
yield request.node._request.test_info
pytestconfig.session_info.append(request.node._request.test_info)
@pytest.fixture(scope="session")
def save_session_info(pytestconfig, artifacts):
"""Fixture function for saving additional attributes to configuration file."""
yield
write_session_info(path=artifacts / SESSION_INFO_FILE, data=pytestconfig.session_info)

View File

@@ -0,0 +1,2 @@
[pytest]
addopts = --ignore-unknown-dependency

View File

@@ -0,0 +1 @@
pytest-dependency==0.5.1

View File

@@ -0,0 +1,137 @@
#!/usr/bin/env python3
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
""" Test conditional compilation.
"""
import glob
import logging
import os
import sys
import numpy as np
import pytest
from proc_utils import cmd_exec # pylint: disable=import-error
from test_utils import get_lib_sizes, infer_tool, make_build, run_infer # pylint: disable=import-error
log = logging.getLogger()
@pytest.mark.dependency(name="cc_collect")
def test_cc_collect(test_id, model, openvino_ref, test_info,
save_session_info, sea_runtool, collector_dir, artifacts): # pylint: disable=unused-argument
"""Test conditional compilation statistics collection
:param test_info: custom `test_info` field of built-in `request` pytest fixture.
contain a dictionary to store test metadata.
"""
out = artifacts / test_id
test_info["test_id"] = test_id
# cleanup old data if any
prev_result = glob.glob(f"{out}.pid*.csv")
for path in prev_result:
os.remove(path)
# run use case
sys_executable = (
os.path.join(sys.prefix, "python.exe")
if sys.platform == "win32"
else os.path.join(sys.prefix, "bin", "python")
)
return_code, output = cmd_exec(
[
sys_executable,
str(sea_runtool),
f"--output={out}",
f"--bindir={collector_dir}",
"!",
sys_executable,
infer_tool,
f"-m={model}",
"-d=CPU",
f"-r={out}",
]
)
out_csv = glob.glob(f"{out}.pid*.csv")
test_info["out_csv"] = out_csv
assert return_code == 0, f"Command exited with non-zero status {return_code}:\n {output}"
assert len(out_csv) == 1, f'Multiple or none "{out}.pid*.csv" files'
@pytest.mark.dependency(depends=["cc_collect"])
def test_minimized_pkg(test_id, model, openvino_root_dir, artifacts): # pylint: disable=unused-argument
"""Build and install OpenVINO package with collected conditional compilation statistics."""
out = artifacts / test_id
install_prefix = out / "install_pkg"
build_dir = openvino_root_dir / "build_minimized"
out_csv = glob.glob(f"{out}.pid*.csv")
assert len(out_csv) == 1, f'Multiple or none "{out}.pid*.csv" files'
log.info("Building minimized build at %s", build_dir)
return_code, output = make_build(
openvino_root_dir,
build_dir,
install_prefix,
cmake_additional_args=[f"-DSELECTIVE_BUILD_STAT={out_csv[0]}"],
log=log,
)
assert return_code == 0, f"Command exited with non-zero status {return_code}:\n {output}"
@pytest.mark.dependency(depends=["cc_collect", "minimized_pkg"])
def test_infer(test_id, model, artifacts):
"""Test inference with conditional compiled binaries."""
out = artifacts / test_id
minimized_pkg = out / "install_pkg"
return_code, output = run_infer(model, f"{out}_cc.npz", minimized_pkg)
assert return_code == 0, f"Command exited with non-zero status {return_code}:\n {output}"
@pytest.mark.dependency(depends=["cc_collect", "minimized_pkg"])
def test_verify(test_id, model, openvino_ref, artifacts, tolerance=1e-6): # pylint: disable=too-many-arguments
"""Test verifying that inference results are equal."""
out = artifacts / test_id
minimized_pkg = out / "install_pkg"
out_file = f"{out}.npz"
out_file_cc = f"{out}_cc.npz"
return_code, output = run_infer(model, out_file, openvino_ref)
assert return_code == 0, f"Command exited with non-zero status {return_code}:\n {output}"
return_code, output = run_infer(model, out_file_cc, minimized_pkg)
assert return_code == 0, f"Command exited with non-zero status {return_code}:\n {output}"
reference_results = dict(np.load(out_file))
inference_results = dict(np.load(out_file_cc))
assert sorted(reference_results.keys()) == sorted(
inference_results.keys()
), "Results have different number of layers"
for layer in reference_results.keys():
assert np.allclose(
reference_results[layer], inference_results[layer], tolerance
), "Reference and inference results differ"
@pytest.mark.dependency(depends=["cc_collect", "minimized_pkg"])
def test_libs_size(test_id, model, openvino_ref, artifacts): # pylint: disable=unused-argument
"""Test if libraries haven't increased in size after conditional compilation."""
libraries = ["inference_engine_transformations", "MKLDNNPlugin", "ngraph"]
minimized_pkg = artifacts / test_id / "install_pkg"
ref_libs_size = get_lib_sizes(openvino_ref, libraries)
lib_sizes = get_lib_sizes(minimized_pkg, libraries)
for lib in libraries:
lib_size_diff = ref_libs_size[lib] - lib_sizes[lib]
lib_size_diff_percent = lib_size_diff / ref_libs_size[lib] * 100
log.info(
"{}: old - {}kB; new - {}kB; diff = {}kB({:.2f}%)".format(
lib,
ref_libs_size[lib] / 1024,
lib_sizes[lib] / 1024,
lib_size_diff / 1024,
lib_size_diff_percent,
)
)
res = [lib for lib in libraries if lib_sizes[lib] > ref_libs_size[lib]]
assert len(res) == 0, f"These libraries: {res} have increased in size!"

View File

@@ -1,69 +0,0 @@
#!/usr/bin/env python3
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
""" Test conditional compilation statistics collection.
"""
import glob
import os
import sys
import pytest
from proc_utils import cmd_exec # pylint: disable=import-error
from tests_utils import write_session_info, SESSION_INFO_FILE, infer_tool
@pytest.fixture(scope="function")
def test_info(request, pytestconfig):
"""Fixture function for getting the additional attributes of the current test."""
setattr(request.node._request, "test_info", {})
if not hasattr(pytestconfig, "session_info"):
setattr(pytestconfig, "session_info", [])
yield request.node._request.test_info
pytestconfig.session_info.append(request.node._request.test_info)
@pytest.fixture(scope="session")
def save_session_info(pytestconfig, artifacts):
"""Fixture function for saving additional attributes to configuration file."""
yield
write_session_info(path=artifacts / SESSION_INFO_FILE, data=pytestconfig.session_info)
def test_cc_collect(test_id, model, sea_runtool, collector_dir, artifacts, test_info, save_session_info):
""" Test conditional compilation statistics collection
:param test_info: custom `test_info` field of built-in `request` pytest fixture.
contain a dictionary to store test metadata.
"""
out = artifacts / test_id
test_info["test_id"] = test_id
# cleanup old data if any
prev_result = glob.glob(f"{out}.pid*.csv")
for path in prev_result:
os.remove(path)
# run use case
sys_executable = os.path.join(sys.prefix, 'python.exe') if sys.platform == "win32" \
else os.path.join(sys.prefix, 'bin', 'python')
return_code, output = cmd_exec(
[
sys_executable,
str(sea_runtool),
f"--output={out}",
f"--bindir={collector_dir}",
"!",
sys_executable,
infer_tool,
f"-m={model}",
"-d=CPU",
f"-r={out}",
]
)
out_csv = glob.glob(f"{out}.pid*.csv")
test_info["out_csv"] = out_csv
assert return_code == 0, f"Command exited with non-zero status {return_code}:\n {output}"
assert (len(out_csv) == 1), f'Multiple or none "{out}.pid*.csv" files'

View File

@@ -1,18 +0,0 @@
#!/usr/bin/env python3
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
""" Test inference with conditional compiled binaries.
"""
from tests_utils import run_infer
def test_infer(test_id, model, artifacts):
""" Test inference with conditional compiled binaries
"""
install_prefix = artifacts / test_id / "install_pkg"
out = artifacts / test_id
returncode, output = run_infer(model, f"{out}_cc.npz", install_prefix)
assert returncode == 0, f"Command exited with non-zero status {returncode}:\n {output}"

View File

@@ -1,41 +0,0 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from pathlib import Path
import logging
from path_utils import get_lib_path # pylint: disable=import-error
def get_lib_sizes(path, libraries):
"""Function for getting lib sizes by lib names"""
assert Path.exists(path), f'Directory {path} isn\'t created'
result = {}
error_lib = []
for lib in libraries:
try:
result[lib] = Path(path).joinpath(get_lib_path(lib)).stat().st_size
except FileNotFoundError as error:
error_lib.append(str(error))
assert len(error_lib) == 0, 'Following libraries couldn\'t be found: \n{}'.format('\n'.join(error_lib))
return result
def test_size_tracking_libs(openvino_root_dir, test_id, model, artifacts):
log = logging.getLogger('size_tracking')
libraries = ['inference_engine_transformations', 'MKLDNNPlugin', 'ngraph']
ref_libs_size = get_lib_sizes(openvino_root_dir, libraries)
install_prefix = artifacts / test_id / 'install_pkg'
lib_sizes = get_lib_sizes(install_prefix, libraries)
for lib in libraries:
lib_size_diff = ref_libs_size[lib] - lib_sizes[lib]
lib_size_diff_percent = lib_size_diff / ref_libs_size[lib] * 100
log.info('{}: old - {}kB; new - {}kB; diff = {}kB({:.2f}%)'.format(lib,
ref_libs_size[lib] / 1024,
lib_sizes[lib] / 1024,
lib_size_diff / 1024,
lib_size_diff_percent))
res = [lib for lib in libraries if lib_sizes[lib] > ref_libs_size[lib]]
assert len(res) == 0, f'These libraries: {res} have increased in size!'

View File

@@ -0,0 +1,88 @@
#!/usr/bin/env python3
# Copyright (C) 2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
""" Utility functions for work with json test configuration file.
"""
import os
import json
import multiprocessing
import sys
from inspect import getsourcefile
from pathlib import Path
from install_pkg import get_openvino_environment # pylint: disable=import-error
from path_utils import get_lib_path # pylint: disable=import-error
from proc_utils import cmd_exec # pylint: disable=import-error
SESSION_INFO_FILE = "cc_tests.json"
infer_tool = str((Path(getsourcefile(lambda: 0)) / ".." / "tools" / "infer_tool.py").resolve())
def validate_path_arg(path: Path, is_dir=False):
""""Check if path argument is correct."""
if not path.exists():
raise ValueError(f"{path} does not exist.")
if is_dir and not path.is_dir():
raise ValueError(f"{path} is not a directory.")
return path
def get_lib_sizes(path, libraries):
""" Function for getting lib sizes by lib names
"""
assert Path.exists(path), f'Directory {path} isn\'t created'
result = {}
error_lib = []
for lib in libraries:
try:
result[lib] = Path(path).joinpath(get_lib_path(lib)).stat().st_size
except FileNotFoundError as error:
error_lib.append(str(error))
assert len(error_lib) == 0, 'Following libraries couldn\'t be found: \n{}'.format(
'\n'.join(error_lib))
return result
def read_session_info(path: Path = Path(getsourcefile(lambda: 0)).parent / SESSION_INFO_FILE):
with open(path, 'r') as json_file:
cc_tests_ids = json.load(json_file)
return cc_tests_ids
def write_session_info(path: Path = Path(getsourcefile(lambda: 0)).parent / SESSION_INFO_FILE,
data: dict = None):
with open(path, "w") as json_file:
json.dump(data, json_file, indent=4)
def run_infer(model, out_file, install_dir):
""" Function running inference
"""
sys_executable = os.path.join(sys.prefix, 'python.exe') if sys.platform == "win32" \
else os.path.join(sys.prefix, 'bin', 'python')
return_code, output = cmd_exec(
[sys_executable,
infer_tool,
"-d=CPU", f"-m={model}", f"-r={out_file}"
],
env=get_openvino_environment(install_dir),
)
return return_code, output
def make_build(openvino_root_dir, build_dir, install_dir, cmake_additional_args=None, log=None):
"""Parametrized build and install OpenVINO package."""
additional_args_line = " ".join(cmake_additional_args) + " " if cmake_additional_args else ""
nproc = multiprocessing.cpu_count()
cmd = (
f"cmake -DENABLE_PROFILING_ITT=ON -DCMAKE_BUILD_TYPE=Release "
f"-DPYTHON_EXECUTABLE={sys.executable} {additional_args_line}"
f"-S {openvino_root_dir} -B {build_dir} &&"
f"cmake --build {build_dir} -j{nproc} && "
f"cmake --install {build_dir} --prefix {install_dir}"
)
return cmd_exec([cmd], shell=True, log=log)

View File

@@ -1,29 +0,0 @@
#!/usr/bin/env python3
# Copyright (C) 2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
""" Test to verify inference results.
"""
import numpy as np
from tests_utils import run_infer
def test_verify(test_id, model, artifacts, openvino_root_dir, tolerance=1e-6): # pylint: disable=too-many-arguments
""" Test verifying that inference results are equal
"""
out = artifacts / test_id
install_prefix = artifacts / test_id / "install_pkg"
out_file = f"{out}.npz"
out_file_cc = f"{out}_cc.npz"
returncode, output = run_infer(model, out_file, openvino_root_dir)
assert returncode == 0, f"Command exited with non-zero status {returncode}:\n {output}"
returncode, output = run_infer(model, out_file_cc, install_prefix)
assert returncode == 0, f"Command exited with non-zero status {returncode}:\n {output}"
reference_results = dict(np.load(out_file))
inference_results = dict(np.load(out_file_cc))
assert sorted(reference_results.keys()) == sorted(inference_results.keys()), \
"Results have different number of layers"
for layer in reference_results.keys():
assert np.allclose(reference_results[layer], inference_results[layer], tolerance), \
"Reference and inference results differ"

View File

@@ -1,43 +0,0 @@
#!/usr/bin/env python3
# Copyright (C) 2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
""" Utility functions for work with json test configuration file.
"""
import os
import json
import sys
from inspect import getsourcefile
from pathlib import Path
from proc_utils import cmd_exec # pylint: disable=import-error
from install_pkg import get_openvino_environment # pylint: disable=import-error
SESSION_INFO_FILE = "cc_tests.json"
infer_tool = str((Path(getsourcefile(lambda: 0)) / ".." / "tools" / "infer_tool.py").resolve())
def read_session_info(path: Path = Path(getsourcefile(lambda: 0)).parent / SESSION_INFO_FILE):
with open(path, 'r') as json_file:
cc_tests_ids = json.load(json_file)
return cc_tests_ids
def write_session_info(path: Path = Path(getsourcefile(lambda: 0)).parent / SESSION_INFO_FILE,
data: dict = None):
with open(path, "w") as json_file:
json.dump(data, json_file, indent=4)
def run_infer(model, out_file, install_dir):
""" Function running inference
"""
sys_executable = os.path.join(sys.prefix, 'python.exe') if sys.platform == "win32" \
else os.path.join(sys.prefix, 'bin', 'python')
returncode, output = cmd_exec(
[sys_executable,
infer_tool,
"-d=CPU", f"-m={model}", f"-r={out_file}"
],
env=get_openvino_environment(install_dir),
)
return returncode, output

View File

@@ -10,7 +10,7 @@ import logging
import subprocess
def cmd_exec(args, env=None, log=None, verbose=True):
def cmd_exec(args, env=None, log=None, verbose=True, shell=False):
""" Run cmd using subprocess with logging and other improvements
"""
if log is None:
@@ -28,6 +28,7 @@ def cmd_exec(args, env=None, log=None, verbose=True):
stderr=subprocess.STDOUT,
encoding="utf-8",
universal_newlines=True,
shell=shell,
)
output = []
for line in iter(proc.stdout.readline, ""):