Add conditional compilation tests (#3996)

This commit is contained in:
Andrey Somsikov
2021-01-28 13:27:07 +03:00
committed by GitHub
parent 78585c2fe8
commit 81da815dd8
5 changed files with 200 additions and 0 deletions

View File

@@ -0,0 +1,100 @@
#!/usr/bin/env python3
# Copyright (C) 2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
# pylint: disable=line-too-long
""" Pytest configuration for compilation tests.
Sample usage:
python3 -m pytest --artifacts ./compiled --models_root=<path to openvinotoolkit/testdata repository> \
--sea_runtool=./IntelSEAPI/runtool/sea_runtool.py \
--benchmark_app=./bin/benchmark_app test_collect.py
"""
import sys
from inspect import getsourcefile
from pathlib import Path
import pytest
# add ../lib to imports
sys.path.insert(
0, str((Path(getsourcefile(lambda: 0)) / ".." / ".." / "lib").resolve(strict=True))
)
# Using models from https://github.com/openvinotoolkit/testdata
# $find models -wholename "*.xml"
TESTS = [
{"path": "models/mobilenet_v2_1.4_224/mobilenet_v2_1.4_224_i8.xml"},
{"path": "models/mobilenet_v2_1.0_224/mobilenet_v2_1.0_224_i8.xml"},
{"path": "models/inception_v3/inception_v3_i8.xml"},
{"path": "models/resnet_v1_50/resnet_v1_50_i8.xml"},
{"path": "models/test_model/test_model_fp16.xml"},
{"path": "models/test_model/test_model_fp32.xml"},
]
def pytest_addoption(parser):
""" Define extra options for pytest options
"""
parser.addoption(
"--models_root", required=True, type=Path, help="Path to models root directory"
)
parser.addoption(
"--sea_runtool", required=True, type=Path, help="Path to sea_runtool.py"
)
parser.addoption(
"--benchmark_app",
required=True,
type=Path,
help="Path to the benchmark_app tool",
)
parser.addoption(
"-A",
"--artifacts",
required=True,
type=Path,
help="Artifacts directory where tests write output or read input",
)
def pytest_generate_tests(metafunc):
""" Generate tests depending on command line options
"""
params = []
ids = []
for test in TESTS:
extra_args = {}
path = test["path"]
if "marks" in test:
extra_args["marks"] = test["marks"]
params.append(pytest.param(Path(path), **extra_args))
ids = ids + [path]
metafunc.parametrize("model", params, ids=ids)
@pytest.fixture(scope="session")
def sea_runtool(request):
"""Fixture function for command-line option."""
return request.config.getoption("sea_runtool")
@pytest.fixture(scope="session")
def benchmark_app(request):
"""Fixture function for command-line option."""
return request.config.getoption("benchmark_app")
@pytest.fixture(scope="session")
def models_root(request):
"""Fixture function for command-line option."""
return request.config.getoption("models_root")
@pytest.fixture(scope="session")
def artifacts(request):
"""Fixture function for command-line option."""
return request.config.getoption("artifacts")

View File

@@ -0,0 +1,40 @@
#!/usr/bin/env python3
# Copyright (C) 2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
""" Test conditional compilation statistics collection.
"""
import glob
import os
from proc_utils import cmd_exec # pylint: disable=import-error
def test_cc_collect(model, sea_runtool, benchmark_app, models_root, artifacts):
""" Test conditional compilation statistics collection
"""
out = artifacts / model.parent / model.stem
# cleanup old data if any
prev_results = glob.glob(f"{out}.pid*.csv")
for path in prev_results:
os.remove(path)
# run use case
returncode, _ = cmd_exec(
[
"python",
str(sea_runtool),
f"-o={out}",
"-f=stat",
"!",
str(benchmark_app),
"-d=CPU",
f"-m={models_root / model}",
"-niter=1",
"-nireq=1",
]
)
assert returncode == 0, f"Command exited with non-zero status {returncode}"
assert (
len(glob.glob(f"{out}.pid*.csv")) == 1
), f'Multiple or none "{out}.pid*.csv" files'

View File

@@ -0,0 +1,17 @@
#!/usr/bin/env python3
# Copyright (C) 2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
""" Test inference with conditional compiled binaries.
"""
from proc_utils import cmd_exec # pylint: disable=import-error
def test_infer(model, models_root, benchmark_app):
""" Test inference with conditional compiled binaries
"""
returncode, _ = cmd_exec(
[str(benchmark_app), "-d=CPU", f"-m={models_root / model}", "-niter=1", "-nireq=1"]
)
assert returncode == 0, f"Command exited with non-zero status {returncode}"