Added test validating inference results after conditional compilation (#4840)

This commit is contained in:
Olesya Martinyuk 2021-04-13 22:16:14 +03:00 committed by GitHub
parent 0fcb6d0464
commit 070201feee
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 87 additions and 55 deletions

View File

@ -8,20 +8,18 @@
""" Pytest configuration for compilation tests.
Sample usage:
python3 -m pytest --artifacts ./compiled --test_conf=<path to test config> \
--sea_runtool=./IntelSEAPI/runtool/sea_runtool.py \
--benchmark_app=./bin/benchmark_app test_collect.py
python3 -m pytest --test_conf=<path to test config> \
--sea_runtool=./thirdparty/itt_collector/runtool/sea_runtool.py --artifacts ./compiled test_collect.py \
--collector_dir=./bin/intel64/Release --artifacts=<path to directory where tests write output or read input> \
--openvino_ref=<Path to root directory with installed OpenVINO>
"""
import sys
import pytest
import yaml
from inspect import getsourcefile
from pathlib import Path
from tests_utils import write_session_info, SESSION_INFO_FILE
# add ../lib to imports
sys.path.insert(0, str((Path(getsourcefile(lambda: 0)) / ".." / ".." / "lib").resolve(strict=True)))
@ -41,11 +39,6 @@ def pytest_addoption(parser):
type=Path,
help="Path to sea_runtool.py"
)
parser.addoption(
"--benchmark_app",
type=Path,
help="Path to the benchmark_app tool",
)
parser.addoption(
"--collector_dir",
type=Path,
@ -85,37 +78,12 @@ def pytest_generate_tests(metafunc):
metafunc.parametrize("test_id, model", params, ids=ids)
@pytest.fixture(scope="function")
def test_info(request, pytestconfig):
"""Fixture function for getting the additional attributes of the current test."""
setattr(request.node._request, "test_info", {})
if not hasattr(pytestconfig, "session_info"):
setattr(pytestconfig, "session_info", [])
yield request.node._request.test_info
pytestconfig.session_info.append(request.node._request.test_info)
@pytest.fixture(scope="session", autouse=True)
def save_session_info(pytestconfig, artifacts):
"""Fixture function for saving additional attributes to configuration file."""
yield
write_session_info(path=artifacts / SESSION_INFO_FILE, data=pytestconfig.session_info)
@pytest.fixture(scope="session")
def sea_runtool(request):
"""Fixture function for command-line option."""
return request.config.getoption("sea_runtool")
@pytest.fixture(scope="session")
def benchmark_app(request):
"""Fixture function for command-line option."""
return request.config.getoption("benchmark_app")
@pytest.fixture(scope="session")
def collector_dir(request):
"""Fixture function for command-line option."""

View File

@ -9,11 +9,32 @@
import glob
import os
import sys
import pytest
from proc_utils import cmd_exec # pylint: disable=import-error
from tests_utils import write_session_info, SESSION_INFO_FILE, infer_tool
def test_cc_collect(test_id, model, sea_runtool, benchmark_app, collector_dir, artifacts, test_info):
@pytest.fixture(scope="function")
def test_info(request, pytestconfig):
"""Fixture function for getting the additional attributes of the current test."""
setattr(request.node._request, "test_info", {})
if not hasattr(pytestconfig, "session_info"):
setattr(pytestconfig, "session_info", [])
yield request.node._request.test_info
pytestconfig.session_info.append(request.node._request.test_info)
@pytest.fixture(scope="session")
def save_session_info(pytestconfig, artifacts):
"""Fixture function for saving additional attributes to configuration file."""
yield
write_session_info(path=artifacts / SESSION_INFO_FILE, data=pytestconfig.session_info)
def test_cc_collect(test_id, model, sea_runtool, collector_dir, artifacts, test_info, save_session_info):
""" Test conditional compilation statistics collection
:param test_info: custom `test_info` field of built-in `request` pytest fixture.
contain a dictionary to store test metadata.
@ -25,19 +46,20 @@ def test_cc_collect(test_id, model, sea_runtool, benchmark_app, collector_dir, a
for path in prev_result:
os.remove(path)
# run use case
sys_executable = os.path.join(sys.prefix, 'python.exe') if sys.platform == "win32" \
else os.path.join(sys.prefix, 'bin', 'python')
return_code, output = cmd_exec(
[
sys.executable,
sys_executable,
str(sea_runtool),
f"--output={out}",
f"--bindir={collector_dir}",
"--app_status",
"!",
str(benchmark_app),
"-d=CPU",
sys_executable,
infer_tool,
f"-m={model}",
"-niter=1",
"-nireq=1",
"-d=CPU",
f"-r={out}",
]
)
out_csv = glob.glob(f"{out}.pid*.csv")

View File

@ -5,19 +5,14 @@
""" Test inference with conditional compiled binaries.
"""
import sys
from proc_utils import cmd_exec # pylint: disable=import-error
from install_pkg import get_openvino_environment # pylint: disable=import-error
from tests_utils import run_infer
def test_infer(test_id, model, artifacts):
""" Test inference with conditional compiled binaries
"""
install_prefix = artifacts / test_id / "install_pkg"
exe_suffix = ".exe" if sys.platform == "win32" else ""
benchmark_app = install_prefix / "bin" / f"benchmark_app{exe_suffix}"
returncode, _ = cmd_exec(
[str(benchmark_app), "-d=CPU", f"-m={model}", "-niter=1", "-nireq=1"],
env=get_openvino_environment(install_prefix),
)
assert returncode == 0, f"Command exited with non-zero status {returncode}"
out = artifacts / test_id
returncode, output = run_infer(model, f"{out}_cc.npz", install_prefix)
assert returncode == 0, f"Command exited with non-zero status {returncode}:\n {output}"

View File

@ -0,0 +1,29 @@
#!/usr/bin/env python3
# Copyright (C) 2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
""" Test to verify inference results.
"""
import numpy as np
from tests_utils import run_infer
def test_verify(test_id, model, artifacts, openvino_root_dir, tolerance=1e-6): # pylint: disable=too-many-arguments
""" Test verifying that inference results are equal
"""
out = artifacts / test_id
install_prefix = artifacts / test_id / "install_pkg"
out_file = f"{out}.npz"
out_file_cc = f"{out}_cc.npz"
returncode, output = run_infer(model, out_file, openvino_root_dir)
assert returncode == 0, f"Command exited with non-zero status {returncode}:\n {output}"
returncode, output = run_infer(model, out_file_cc, install_prefix)
assert returncode == 0, f"Command exited with non-zero status {returncode}:\n {output}"
reference_results = dict(np.load(out_file))
inference_results = dict(np.load(out_file_cc))
assert sorted(reference_results.keys()) == sorted(inference_results.keys()), \
"Results have different number of layers"
for layer in reference_results.keys():
assert np.allclose(reference_results[layer], inference_results[layer], tolerance), \
"Reference and inference results differ"

View File

@ -4,13 +4,16 @@
""" Utility functions for work with json test configuration file.
"""
import os
import json
import sys
from inspect import getsourcefile
from pathlib import Path
from proc_utils import cmd_exec # pylint: disable=import-error
from install_pkg import get_openvino_environment # pylint: disable=import-error
SESSION_INFO_FILE = "cc_tests.json"
infer_tool = str((Path(getsourcefile(lambda: 0)) / ".." / "tools" / "infer_tool.py").resolve())
def read_session_info(path: Path = Path(getsourcefile(lambda: 0)).parent / SESSION_INFO_FILE):
@ -23,3 +26,18 @@ def write_session_info(path: Path = Path(getsourcefile(lambda: 0)).parent / SESS
data: dict = None):
with open(path, "w") as json_file:
json.dump(data, json_file, indent=4)
def run_infer(model, out_file, install_dir):
""" Function running inference
"""
sys_executable = os.path.join(sys.prefix, 'python.exe') if sys.platform == "win32" \
else os.path.join(sys.prefix, 'bin', 'python')
returncode, output = cmd_exec(
[sys_executable,
infer_tool,
"-d=CPU", f"-m={model}", f"-r={out_file}"
],
env=get_openvino_environment(install_dir),
)
return returncode, output