Small changes in memcheck tests (#7128)
* add "" for windows in cmd line * requirements test * requirements test * add requests in requirements * add scipy in requirements * add executable python for prepare models * remove extra requirements * add window for supported in memcheck tests * add scipy in requirements * change check of product_type * add return for windows * change form of return arg in run_memcheck * change form of return arg in run_memcheck * remove windows check * change return format * remove empty line * change downloader_path
This commit is contained in:
parent
c39e6fcfd8
commit
8b0bec4e04
@ -43,7 +43,7 @@ class VirtualEnv:
|
|||||||
if sys.platform.startswith('linux') or sys.platform == 'darwin':
|
if sys.platform.startswith('linux') or sys.platform == 'darwin':
|
||||||
self.venv_executable = self.venv_dir / "bin" / "python3"
|
self.venv_executable = self.venv_dir / "bin" / "python3"
|
||||||
else:
|
else:
|
||||||
self.venv_executable = self.venv_dir / "Scripts" / "python3.exe"
|
self.venv_executable = self.venv_dir / "Scripts" / "python.exe"
|
||||||
|
|
||||||
def get_venv_executable(self):
|
def get_venv_executable(self):
|
||||||
"""Returns path to executable from virtual environment."""
|
"""Returns path to executable from virtual environment."""
|
||||||
@ -55,7 +55,8 @@ class VirtualEnv:
|
|||||||
|
|
||||||
def create(self):
|
def create(self):
|
||||||
"""Creates virtual environment."""
|
"""Creates virtual environment."""
|
||||||
cmd = '{executable} -m venv {venv}'.format(executable=sys.executable, venv=self.get_venv_dir())
|
cmd = '"{executable}" -m venv {venv}'.format(executable=sys.executable,
|
||||||
|
venv=self.get_venv_dir())
|
||||||
run_in_subprocess(cmd)
|
run_in_subprocess(cmd)
|
||||||
self.is_created = True
|
self.is_created = True
|
||||||
|
|
||||||
@ -63,10 +64,10 @@ class VirtualEnv:
|
|||||||
"""Installs provided requirements. Creates virtual environment if it hasn't been created."""
|
"""Installs provided requirements. Creates virtual environment if it hasn't been created."""
|
||||||
if not self.is_created:
|
if not self.is_created:
|
||||||
self.create()
|
self.create()
|
||||||
cmd = '{executable} -m pip install --upgrade pip'.format(executable=self.get_venv_executable())
|
cmd = '"{executable}" -m pip install --upgrade pip'.format(executable=self.get_venv_executable())
|
||||||
for req in requirements:
|
for req in requirements:
|
||||||
# Don't install requirements via one `pip install` call to prevent "ERROR: Double requirement given"
|
# Don't install requirements via one `pip install` call to prevent "ERROR: Double requirement given"
|
||||||
cmd += ' && {executable} -m pip install -r {req}'.format(executable=self.get_venv_executable(), req=req)
|
cmd += ' && "{executable}" -m pip install -r {req}'.format(executable=self.get_venv_executable(), req=req)
|
||||||
run_in_subprocess(cmd)
|
run_in_subprocess(cmd)
|
||||||
|
|
||||||
def create_n_install_requirements(self, *requirements):
|
def create_n_install_requirements(self, *requirements):
|
||||||
@ -146,7 +147,6 @@ def main():
|
|||||||
Venv = VirtualEnv("./.stress_venv")
|
Venv = VirtualEnv("./.stress_venv")
|
||||||
requirements = [
|
requirements = [
|
||||||
args.mo_tool.parent / "requirements.txt",
|
args.mo_tool.parent / "requirements.txt",
|
||||||
args.mo_tool.parent / "requirements_dev.txt",
|
|
||||||
omz_path / "tools" / "model_tools" / "requirements.in",
|
omz_path / "tools" / "model_tools" / "requirements.in",
|
||||||
omz_path / "tools" / "model_tools" / "requirements-caffe2.in",
|
omz_path / "tools" / "model_tools" / "requirements-caffe2.in",
|
||||||
omz_path / "tools" / "model_tools" / "requirements-pytorch.in"
|
omz_path / "tools" / "model_tools" / "requirements-pytorch.in"
|
||||||
@ -192,22 +192,22 @@ def main():
|
|||||||
args.omz_irs_out_dir / model_rec.attrib["subdirectory"] / precision / (model_rec.attrib["name"] + ".xml"))
|
args.omz_irs_out_dir / model_rec.attrib["subdirectory"] / precision / (model_rec.attrib["name"] + ".xml"))
|
||||||
|
|
||||||
# prepare models
|
# prepare models
|
||||||
|
|
||||||
downloader_path = omz_path / "tools" / "model_tools" / "downloader.py"
|
downloader_path = omz_path / "tools" / "model_tools" / "downloader.py"
|
||||||
cmd = '{downloader_path} --name {model_name}' \
|
cmd = '"{executable}" {downloader_path} --name {model_name}' \
|
||||||
' --precisions={precision}' \
|
' --precisions={precision}' \
|
||||||
' --num_attempts {num_attempts}' \
|
' --num_attempts {num_attempts}' \
|
||||||
' --output_dir {models_dir}' \
|
' --output_dir {models_dir}' \
|
||||||
' --cache_dir {cache_dir}'.format(downloader_path=downloader_path, precision=precision,
|
' --cache_dir {cache_dir}'.format(executable=python_executable, downloader_path=downloader_path,
|
||||||
models_dir=args.omz_models_out_dir,
|
model_name=model_name,
|
||||||
num_attempts=OMZ_NUM_ATTEMPTS, model_name=model_name,
|
precision=precision, num_attempts=OMZ_NUM_ATTEMPTS,
|
||||||
cache_dir=args.omz_cache_dir)
|
models_dir=args.omz_models_out_dir, cache_dir=args.omz_cache_dir)
|
||||||
|
|
||||||
run_in_subprocess(cmd, check_call=not args.skip_omz_errors)
|
run_in_subprocess(cmd, check_call=not args.skip_omz_errors)
|
||||||
|
|
||||||
# convert models to IRs
|
# convert models to IRs
|
||||||
converter_path = omz_path / "tools" / "model_tools" / "converter.py"
|
converter_path = omz_path / "tools" / "model_tools" / "converter.py"
|
||||||
# NOTE: remove --precisions if both precisions (FP32 & FP16) required
|
# NOTE: remove --precisions if both precisions (FP32 & FP16) required
|
||||||
cmd = '{executable} {converter_path} --name {model_name}' \
|
cmd = '"{executable}" {converter_path} --name {model_name}' \
|
||||||
' -p "{executable}"' \
|
' -p "{executable}"' \
|
||||||
' --precisions={precision}' \
|
' --precisions={precision}' \
|
||||||
' --output_dir {irs_dir}' \
|
' --output_dir {irs_dir}' \
|
||||||
|
@ -26,10 +26,10 @@ import yaml
|
|||||||
from pymongo import MongoClient
|
from pymongo import MongoClient
|
||||||
|
|
||||||
# Database arguments
|
# Database arguments
|
||||||
DATABASE = 'memcheck' # database name for memcheck results
|
DATABASE = 'memcheck' # database name for memcheck results
|
||||||
DB_COLLECTIONS = ["commit", "nightly", "weekly"]
|
DB_COLLECTIONS = ["commit", "nightly", "weekly"]
|
||||||
|
|
||||||
PRODUCT_NAME = 'dldt' # product name from build manifest
|
PRODUCT_NAME = 'dldt' # product name from build manifest
|
||||||
RE_GTEST_MODEL_XML = re.compile(r'<model[^>]*>')
|
RE_GTEST_MODEL_XML = re.compile(r'<model[^>]*>')
|
||||||
RE_GTEST_CUR_MEASURE = re.compile(r'\[\s*MEASURE\s*\]')
|
RE_GTEST_CUR_MEASURE = re.compile(r'\[\s*MEASURE\s*\]')
|
||||||
RE_GTEST_REF_MEASURE = re.compile(
|
RE_GTEST_REF_MEASURE = re.compile(
|
||||||
@ -109,18 +109,18 @@ def parse_memcheck_log(log_path):
|
|||||||
log_lines = log.splitlines()
|
log_lines = log.splitlines()
|
||||||
for index, line in enumerate(log_lines):
|
for index, line in enumerate(log_lines):
|
||||||
if RE_GTEST_REF_MEASURE.search(line):
|
if RE_GTEST_REF_MEASURE.search(line):
|
||||||
heading = [name.lower() for name in log_lines[index+1]
|
heading = [name.lower() for name in log_lines[index + 1]
|
||||||
[len(GTEST_INFO):].split()]
|
[len(GTEST_INFO):].split()]
|
||||||
values = [int(val) for val in log_lines[index+2]
|
values = [int(val) for val in log_lines[index + 2]
|
||||||
[len(GTEST_INFO):].split()]
|
[len(GTEST_INFO):].split()]
|
||||||
ref_metrics = dict(zip(heading, values))
|
ref_metrics = dict(zip(heading, values))
|
||||||
for index in reversed(range(len(log_lines))):
|
for index in reversed(range(len(log_lines))):
|
||||||
if RE_GTEST_CUR_MEASURE.search(log_lines[index]):
|
if RE_GTEST_CUR_MEASURE.search(log_lines[index]):
|
||||||
test_name = log_lines[index].split()[-1]
|
test_name = log_lines[index].split()[-1]
|
||||||
heading = [name.lower() for name in log_lines[index+1]
|
heading = [name.lower() for name in log_lines[index + 1]
|
||||||
[len(GTEST_INFO):].split()]
|
[len(GTEST_INFO):].split()]
|
||||||
values = [int(val) for val in log_lines[index+2]
|
values = [int(val) for val in log_lines[index + 2]
|
||||||
[len(GTEST_INFO):].split()]
|
[len(GTEST_INFO):].split()]
|
||||||
entry = SimpleNamespace(
|
entry = SimpleNamespace(
|
||||||
metrics=dict(zip(heading, values)),
|
metrics=dict(zip(heading, values)),
|
||||||
test_name=test_name,
|
test_name=test_name,
|
||||||
@ -191,16 +191,17 @@ TIMELINE_SIMILARITY = ('test_name', 'model', 'device', 'target_branch')
|
|||||||
def query_timeline(records, db_url, db_collection, max_items=20, similarity=TIMELINE_SIMILARITY):
|
def query_timeline(records, db_url, db_collection, max_items=20, similarity=TIMELINE_SIMILARITY):
|
||||||
""" Query database for similar memcheck items committed previously
|
""" Query database for similar memcheck items committed previously
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def timeline_key(item):
|
def timeline_key(item):
|
||||||
""" Defines order for timeline report entries
|
""" Defines order for timeline report entries
|
||||||
"""
|
"""
|
||||||
if len(item['metrics']['vmhwm']) <= 1:
|
if len(item['metrics']['vmhwm']) <= 1:
|
||||||
return 1
|
return 1
|
||||||
order = item['metrics']['vmhwm'][-1] - item['metrics']['vmhwm'][-2] + \
|
order = item['metrics']['vmhwm'][-1] - item['metrics']['vmhwm'][-2] + \
|
||||||
item['metrics']['vmrss'][-1] - item['metrics']['vmrss'][-2]
|
item['metrics']['vmrss'][-1] - item['metrics']['vmrss'][-2]
|
||||||
if not item['status']:
|
if not item['status']:
|
||||||
# ensure failed cases are always on top
|
# ensure failed cases are always on top
|
||||||
order += sys.maxsize/2
|
order += sys.maxsize / 2
|
||||||
return order
|
return order
|
||||||
|
|
||||||
client = MongoClient(db_url)
|
client = MongoClient(db_url)
|
||||||
|
@ -3,4 +3,5 @@ Jinja2>=2.11.2
|
|||||||
PyYAML>=5.4.1
|
PyYAML>=5.4.1
|
||||||
fastjsonschema~=2.15.1
|
fastjsonschema~=2.15.1
|
||||||
pandas>=1.1.5
|
pandas>=1.1.5
|
||||||
h5py>=3.1.0
|
h5py>=3.1.0
|
||||||
|
scipy~=1.5.4
|
Loading…
Reference in New Issue
Block a user