Summary report generator for layer and subgraph tests (#2707)

* Initial summary dumper implementation

* Handle Tensoriterator body + add parser script

* Add support of XML reports merging + report OP names with versions

* Remove debug device name change

* Fix windows building issue

* Add --disable_test_skips command line option

* Gtest failure with logging

* Change skipping logic and resolve linkage errors caused by extern

* Get graph body from Loop

* Fix disable_tests_skipping symbol redefinition

* Fix inline for currentTestIsDisabled

* Rollback get_body for Loop

* Handle cases with skip in test SetUp

* Report Loop and TI ops along with ops in subgraph body

* Resolve some PR comments

* Dummy commit to kick pre-commit validation

Co-authored-by: Efode, Irina <irina.efode@intel.com>
This commit is contained in:
Mikhail Treskin 2020-11-12 12:33:23 +03:00 committed by GitHub
parent f4d399f471
commit 809c504d0a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 590 additions and 38 deletions

View File

@ -0,0 +1,32 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "gtest/gtest.h"
#include "functional_test_utils/layer_test_utils.hpp"
int main(int argc, char* argv[]) {
FuncTestUtils::SkipTestsConfig::disable_tests_skipping = false;
bool print_custom_help = false;
for (int i = 0; i < argc; ++i) {
if (std::string(argv[i]) == "--disable_tests_skipping") {
FuncTestUtils::SkipTestsConfig::disable_tests_skipping = true;
}
if (std::string(argv[i]) == "--help") {
print_custom_help = true;
}
}
if (print_custom_help) {
std::cout << "Custom command line argument:" << std::endl;
std::cout << " --disable_tests_skipping" << std::endl;
std::cout << " Ignore tests skipping rules and run all the test" << std::endl;
std::cout << " (except those which are skipped with DISABLED prefix)" << std::endl;
std::cout << std::endl;
}
::testing::InitGoogleTest(&argc, argv);
::testing::AddGlobalTestEnvironment(new LayerTestsUtils::TestEnvironment);
auto retcode = RUN_ALL_TESTS();
return retcode;
}

View File

@ -0,0 +1,32 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "gtest/gtest.h"
#include "functional_test_utils/layer_test_utils.hpp"
int main(int argc, char* argv[]) {
FuncTestUtils::SkipTestsConfig::disable_tests_skipping = false;
bool print_custom_help = false;
for (int i = 0; i < argc; ++i) {
if (std::string(argv[i]) == "--disable_tests_skipping") {
FuncTestUtils::SkipTestsConfig::disable_tests_skipping = true;
}
if (std::string(argv[i]) == "--help") {
print_custom_help = true;
}
}
if (print_custom_help) {
std::cout << "Custom command line argument:" << std::endl;
std::cout << " --disable_tests_skipping" << std::endl;
std::cout << " Ignore tests skipping rules and run all the test" << std::endl;
std::cout << " (except those which are skipped with DISABLED prefix)" << std::endl;
std::cout << std::endl;
}
::testing::InitGoogleTest(&argc, argv);
::testing::AddGlobalTestEnvironment(new LayerTestsUtils::TestEnvironment);
auto retcode = RUN_ALL_TESTS();
return retcode;
}

View File

@ -0,0 +1,32 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "gtest/gtest.h"
#include "functional_test_utils/layer_test_utils.hpp"
int main(int argc, char* argv[]) {
FuncTestUtils::SkipTestsConfig::disable_tests_skipping = false;
bool print_custom_help = false;
for (int i = 0; i < argc; ++i) {
if (std::string(argv[i]) == "--disable_tests_skipping") {
FuncTestUtils::SkipTestsConfig::disable_tests_skipping = true;
}
if (std::string(argv[i]) == "--help") {
print_custom_help = true;
}
}
if (print_custom_help) {
std::cout << "Custom command line argument:" << std::endl;
std::cout << " --disable_tests_skipping" << std::endl;
std::cout << " Ignore tests skipping rules and run all the test" << std::endl;
std::cout << " (except those which are skipped with DISABLED prefix)" << std::endl;
std::cout << std::endl;
}
::testing::InitGoogleTest(&argc, argv);
::testing::AddGlobalTestEnvironment(new LayerTestsUtils::TestEnvironment);
auto retcode = RUN_ALL_TESTS();
return retcode;
}

View File

@ -0,0 +1,32 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "gtest/gtest.h"
#include "functional_test_utils/layer_test_utils.hpp"
int main(int argc, char* argv[]) {
FuncTestUtils::SkipTestsConfig::disable_tests_skipping = false;
bool print_custom_help = false;
for (int i = 0; i < argc; ++i) {
if (std::string(argv[i]) == "--disable_tests_skipping") {
FuncTestUtils::SkipTestsConfig::disable_tests_skipping = true;
}
if (std::string(argv[i]) == "--help") {
print_custom_help = true;
}
}
if (print_custom_help) {
std::cout << "Custom command line argument:" << std::endl;
std::cout << " --disable_tests_skipping" << std::endl;
std::cout << " Ignore tests skipping rules and run all the test" << std::endl;
std::cout << " (except those which are skipped with DISABLED prefix)" << std::endl;
std::cout << std::endl;
}
::testing::InitGoogleTest(&argc, argv);
::testing::AddGlobalTestEnvironment(new LayerTestsUtils::TestEnvironment);
auto retcode = RUN_ALL_TESTS();
return retcode;
}

View File

@ -1,25 +1,191 @@
// Copyright (C) 2019-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <fstream>
#include <transformations/op_conversions/convert_batch_to_space.hpp>
#include <transformations/op_conversions/convert_space_to_batch.hpp>
#include <ngraph/opsets/opset.hpp>
#include <pugixml.hpp>
#include "layer_test_utils.hpp"
#include "plugin_config.hpp"
namespace LayerTestsUtils {
Summary *Summary::p_instance = nullptr;
SummaryDestroyer Summary::destroyer;
SummaryDestroyer::~SummaryDestroyer() {
delete p_instance;
}
void SummaryDestroyer::initialize(Summary *p) {
p_instance = p;
}
Summary &Summary::getInstance() {
if (!p_instance) {
p_instance = new Summary();
destroyer.initialize(p_instance);
}
return *p_instance;
}
void Summary::updateOPsStats(ngraph::NodeTypeInfo op, PassRate::Statuses status) {
auto it = opsStats.find(op);
if (it != opsStats.end()) {
auto &passrate = it->second;
switch (status) {
case PassRate::PASSED:
passrate.passed += 1;
break;
case PassRate::FAILED:
passrate.failed += 1;
break;
case PassRate::SKIPPED:
passrate.skipped += 1;
break;
}
} else {
switch (status) {
case PassRate::PASSED:
opsStats[op] = PassRate(1, 0, 0);
break;
case PassRate::FAILED:
opsStats[op] = PassRate(0, 1, 0);
break;
case PassRate::SKIPPED:
opsStats[op] = PassRate(0, 0, 1);
break;
}
}
}
void TestEnvironment::TearDown() {
std::vector<ngraph::OpSet> opsets;
opsets.push_back(ngraph::get_opset1());
opsets.push_back(ngraph::get_opset2());
opsets.push_back(ngraph::get_opset3());
opsets.push_back(ngraph::get_opset4());
opsets.push_back(ngraph::get_opset5());
std::set<ngraph::NodeTypeInfo> opsInfo;
for (const auto &opset : opsets) {
const auto &type_info_set = opset.get_type_info_set();
opsInfo.insert(type_info_set.begin(), type_info_set.end());
}
auto &s = Summary::getInstance();
auto stats = s.getOPsStats();
pugi::xml_document doc;
std::ifstream file;
file.open(reportFileName);
time_t rawtime;
struct tm *timeinfo;
char timeNow[80];
time(&rawtime);
// cpplint require to use localtime_r instead which is not available in C++14
timeinfo = localtime(&rawtime); // NOLINT
strftime(timeNow, sizeof(timeNow), "%d-%m-%Y %H:%M:%S", timeinfo);
pugi::xml_node root;
if (file) {
doc.load_file(reportFileName.c_str());
root = doc.child("report");
//Ugly but shorter than to write predicate for find_atrribute() to update existing one
root.remove_attribute("timestamp");
root.append_attribute("timestamp").set_value(timeNow);
root.remove_child("ops_list");
root.child("results").remove_child(s.deviceName.c_str());
} else {
root = doc.append_child("report");
root.append_attribute("timestamp").set_value(timeNow);
root.append_child("results");
}
pugi::xml_node opsNode = root.append_child("ops_list");
for (const auto &op : opsInfo) {
std::string name = std::string(op.name) + "-" + std::to_string(op.version);
pugi::xml_node entry = opsNode.append_child(name.c_str());
}
pugi::xml_node resultsNode = root.child("results");
pugi::xml_node currentDeviceNode = resultsNode.append_child(s.deviceName.c_str());
for (const auto &it : stats) {
std::string name = std::string(it.first.name) + "-" + std::to_string(it.first.version);
pugi::xml_node entry = currentDeviceNode.append_child(name.c_str());
entry.append_attribute("passed").set_value(std::to_string(it.second.passed).c_str());
entry.append_attribute("failed").set_value(std::to_string(it.second.failed).c_str());
entry.append_attribute("skipped").set_value(std::to_string(it.second.skipped).c_str());
entry.append_attribute("passrate").set_value(std::to_string(it.second.getPassrate()).c_str());
}
bool result = doc.save_file(reportFileName.c_str());
if (!result) {
std::cout << "Failed to write report to " << reportFileName << "!" << std::endl;
}
}
LayerTestsCommon::LayerTestsCommon() : threshold(1e-2f) {
core = PluginCache::get().ie(targetDevice);
}
void LayerTestsCommon::Run() {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
auto &s = Summary::getInstance();
s.setDeviceName(targetDevice);
auto reportStatus = [this, &s](PassRate::Statuses status) {
if (function){
for (const auto &op : function->get_ordered_ops()) {
if (ngraph::is_type<ngraph::op::Parameter>(op) ||
ngraph::is_type<ngraph::op::Constant>(op) ||
ngraph::is_type<ngraph::op::Result>(op)) {
continue;
} else if (ngraph::is_type<ngraph::op::TensorIterator>(op)) {
s.updateOPsStats(op->get_type_info(), status);
auto ti = ngraph::as_type_ptr<ngraph::op::TensorIterator>(op);
auto ti_body = ti->get_function();
for (const auto &ti_op : ti_body->get_ordered_ops()) {
s.updateOPsStats(ti_op->get_type_info(), status);
}
} else if (ngraph::is_type<ngraph::op::v5::Loop>(op)) {
s.updateOPsStats(op->get_type_info(), status);
auto loop = ngraph::as_type_ptr<ngraph::op::v5::Loop>(op);
auto loop_body = loop->get_function();
for (const auto &loop_op : loop_body->get_ordered_ops()) {
s.updateOPsStats(loop_op->get_type_info(), status);
}
} else {
s.updateOPsStats(op->get_type_info(), status);
}
}
}
};
LoadNetwork();
Infer();
Validate();
if (FuncTestUtils::SkipTestsConfig::currentTestIsDisabled()) {
reportStatus(PassRate::Statuses::SKIPPED);
GTEST_SKIP() << "Disabled test due to configuration" << std::endl;
}
try {
LoadNetwork();
Infer();
Validate();
reportStatus(PassRate::Statuses::PASSED);
}
catch (const std::runtime_error &re) {
reportStatus(PassRate::Statuses::FAILED);
GTEST_FATAL_FAILURE_(re.what());
} catch (const std::exception &ex) {
reportStatus(PassRate::Statuses::FAILED);
GTEST_FATAL_FAILURE_(ex.what());
} catch (...) {
reportStatus(PassRate::Statuses::FAILED);
GTEST_FATAL_FAILURE_("Unknown failure occurred.");
}
}
InferenceEngine::Blob::Ptr LayerTestsCommon::GenerateInput(const InferenceEngine::InputInfo &info) const {
@ -39,29 +205,37 @@ void LayerTestsCommon::Compare(const std::vector<std::uint8_t> &expected, const
const auto &size = actual->size();
switch (precision) {
case InferenceEngine::Precision::FP32:
Compare<float>(reinterpret_cast<const float *>(expectedBuffer), reinterpret_cast<const float *>(actualBuffer), size, threshold);
Compare<float>(reinterpret_cast<const float *>(expectedBuffer),
reinterpret_cast<const float *>(actualBuffer), size, threshold);
break;
case InferenceEngine::Precision::I32:
Compare<int32_t>(reinterpret_cast<const int32_t *>(expectedBuffer), reinterpret_cast<const int32_t *>(actualBuffer), size, 0);
Compare<int32_t>(reinterpret_cast<const int32_t *>(expectedBuffer),
reinterpret_cast<const int32_t *>(actualBuffer), size, 0);
break;
case InferenceEngine::Precision::I64:
Compare<int64_t>(reinterpret_cast<const int64_t *>(expectedBuffer), reinterpret_cast<const int64_t *>(actualBuffer), size, 0);
Compare<int64_t>(reinterpret_cast<const int64_t *>(expectedBuffer),
reinterpret_cast<const int64_t *>(actualBuffer), size, 0);
break;
case InferenceEngine::Precision::I8:
Compare<int8_t>(reinterpret_cast<const int8_t *>(expectedBuffer), reinterpret_cast<const int8_t *>(actualBuffer), size, 0);
Compare<int8_t>(reinterpret_cast<const int8_t *>(expectedBuffer),
reinterpret_cast<const int8_t *>(actualBuffer), size, 0);
break;
case InferenceEngine::Precision::U16:
Compare<uint16_t>(reinterpret_cast<const uint16_t *>(expectedBuffer), reinterpret_cast<const uint16_t *>(actualBuffer), size, 0);
Compare<uint16_t>(reinterpret_cast<const uint16_t *>(expectedBuffer),
reinterpret_cast<const uint16_t *>(actualBuffer), size, 0);
break;
case InferenceEngine::Precision::I16:
Compare<int16_t>(reinterpret_cast<const int16_t *>(expectedBuffer), reinterpret_cast<const int16_t *>(actualBuffer), size, 0);
Compare<int16_t>(reinterpret_cast<const int16_t *>(expectedBuffer),
reinterpret_cast<const int16_t *>(actualBuffer), size, 0);
break;
case InferenceEngine::Precision::BOOL:
case InferenceEngine::Precision::U8:
Compare<uint8_t>(reinterpret_cast<const uint8_t *>(expectedBuffer), reinterpret_cast<const uint8_t *>(actualBuffer), size, 0);
Compare<uint8_t>(reinterpret_cast<const uint8_t *>(expectedBuffer),
reinterpret_cast<const uint8_t *>(actualBuffer), size, 0);
break;
case InferenceEngine::Precision::U64:
Compare<uint64_t>(reinterpret_cast<const uint64_t *>(expectedBuffer), reinterpret_cast<const uint64_t *>(actualBuffer), size, 0);
Compare<uint64_t>(reinterpret_cast<const uint64_t *>(expectedBuffer),
reinterpret_cast<const uint64_t *>(actualBuffer), size, 0);
break;
default:
FAIL() << "Comparator for " << precision << " precision isn't supported";
@ -69,7 +243,7 @@ void LayerTestsCommon::Compare(const std::vector<std::uint8_t> &expected, const
}
void LayerTestsCommon::Compare(const InferenceEngine::Blob::Ptr &expected, const InferenceEngine::Blob::Ptr &actual) {
auto get_raw_buffer = [] (const InferenceEngine::Blob::Ptr &blob) {
auto get_raw_buffer = [](const InferenceEngine::Blob::Ptr &blob) {
auto memory = InferenceEngine::as<InferenceEngine::MemoryBlob>(blob);
IE_ASSERT(memory);
const auto lockedMemory = memory->wmap();
@ -142,29 +316,32 @@ void LayerTestsCommon::Infer() {
std::vector<std::vector<std::uint8_t>> LayerTestsCommon::CalculateRefs() {
// nGraph interpreter does not support f16
// IE converts f16 to f32
ngraph::pass::ConvertPrecision<ngraph::element::Type_t::f16, ngraph::element::Type_t::f32>().run_on_function(function);
ngraph::pass::ConvertPrecision<ngraph::element::Type_t::f16, ngraph::element::Type_t::f32>().run_on_function(
function);
function->validate_nodes_and_infer_types();
auto referenceInputs = std::vector<std::vector<std::uint8_t>>(inputs.size());
for (std::size_t i = 0; i < inputs.size(); ++i) {
const auto& input = inputs[i];
const auto& inputSize = input->byteSize();
const auto &input = inputs[i];
const auto &inputSize = input->byteSize();
auto& referenceInput = referenceInputs[i];
auto &referenceInput = referenceInputs[i];
referenceInput.resize(inputSize);
auto memory = InferenceEngine::as<InferenceEngine::MemoryBlob>(input);
IE_ASSERT(memory);
const auto lockedMemory = memory->wmap();
const auto buffer = lockedMemory.as<const std::uint8_t*>();
const auto buffer = lockedMemory.as<const std::uint8_t *>();
std::copy(buffer, buffer + inputSize, referenceInput.data());
}
auto ieOutPrc = outPrc;
const auto &actualOutputs = GetOutputs();
std::vector<ngraph::element::Type_t> convertType(actualOutputs.size(), FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(ieOutPrc));
std::vector<ngraph::element::Type_t> convertType(actualOutputs.size(),
FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(ieOutPrc));
if (ieOutPrc == InferenceEngine::Precision::UNSPECIFIED) {
for (size_t i = 0; i < convertType.size(); i++) {
convertType[i] = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(actualOutputs[i]->getTensorDesc().getPrecision());
convertType[i] = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(
actualOutputs[i]->getTensorDesc().getPrecision());
}
}
@ -208,24 +385,25 @@ std::vector<InferenceEngine::Blob::Ptr> LayerTestsCommon::GetOutputs() {
return outputs;
}
void LayerTestsCommon::Compare(const std::vector<std::vector<std::uint8_t>>& expectedOutputs, const std::vector<InferenceEngine::Blob::Ptr>& actualOutputs) {
void LayerTestsCommon::Compare(const std::vector<std::vector<std::uint8_t>> &expectedOutputs,
const std::vector<InferenceEngine::Blob::Ptr> &actualOutputs) {
for (std::size_t outputIndex = 0; outputIndex < expectedOutputs.size(); ++outputIndex) {
const auto& expected = expectedOutputs[outputIndex];
const auto& actual = actualOutputs[outputIndex];
const auto &expected = expectedOutputs[outputIndex];
const auto &actual = actualOutputs[outputIndex];
Compare(expected, actual);
}
}
void LayerTestsCommon::Validate() {
auto expectedOutputs = CalculateRefs();
const auto& actualOutputs = GetOutputs();
const auto &actualOutputs = GetOutputs();
if (expectedOutputs.empty()) {
return;
}
IE_ASSERT(actualOutputs.size() == expectedOutputs.size())
<< "nGraph interpreter has " << expectedOutputs.size() << " outputs, while IE " << actualOutputs.size();
<< "nGraph interpreter has " << expectedOutputs.size() << " outputs, while IE " << actualOutputs.size();
Compare(expectedOutputs, actualOutputs);
}
@ -238,7 +416,7 @@ std::shared_ptr<ngraph::Function> LayerTestsCommon::GetFunction() {
return function;
}
std::map<std::string, std::string>& LayerTestsCommon::GetConfiguration() {
std::map<std::string, std::string> &LayerTestsCommon::GetConfiguration() {
return configuration;
}
} // namespace LayerTestsUtils

View File

@ -27,9 +27,94 @@
#include "ngraph_functions/utils/ngraph_helpers.hpp"
#include "ngraph_functions/pass/convert_prc.hpp"
namespace LayerTestsUtils {
class Summary;
class SummaryDestroyer {
private:
Summary *p_instance;
public:
~SummaryDestroyer();
void initialize(Summary *p);
};
class TestEnvironment;
class LayerTestsCommon;
struct PassRate {
enum Statuses {
PASSED,
FAILED,
SKIPPED
};
unsigned long passed = 0;
unsigned long failed = 0;
unsigned long skipped = 0;
PassRate() = default;
PassRate(unsigned long p, unsigned long f, unsigned long s) {
passed = p;
failed = f;
skipped = s;
}
float getPassrate() const {
if (passed == 0 && failed == 0) {
return 0.;
} else if (passed != 0 && failed == 0) {
return 100.;
} else {
return (passed / (passed + failed)) * 100.;
}
}
};
class Summary {
private:
static Summary *p_instance;
static SummaryDestroyer destroyer;
std::map<ngraph::NodeTypeInfo, PassRate> opsStats = {};
std::string deviceName;
protected:
Summary() = default;
Summary(const Summary &);
Summary &operator=(Summary &);
~Summary() = default;
void updateOPsStats(ngraph::NodeTypeInfo op, PassRate::Statuses status);
std::map<ngraph::NodeTypeInfo, PassRate> getOPsStats() { return opsStats; }
std::string getDeviceName() const { return deviceName; }
void setDeviceName(std::string device) { deviceName = device; }
friend class SummaryDestroyer;
friend class TestEnvironment;
friend class LayerTestsCommon;
public:
static Summary &getInstance();
};
class TestEnvironment : public ::testing::Environment {
public:
void TearDown() override;
private:
std::string reportFileName = "report.xml";
};
using TargetDevice = std::string;
typedef std::tuple<
@ -128,6 +213,7 @@ protected:
virtual void Validate();
virtual std::vector<std::vector<std::uint8_t>> CalculateRefs();
std::vector<InferenceEngine::Blob::Ptr> GetOutputs();
InferenceEngine::InferRequest inferRequest;

View File

@ -0,0 +1,74 @@
import xml.etree.ElementTree as ET
from jinja2 import Environment, FileSystemLoader
import argparse
import os
from datetime import datetime
parser = argparse.ArgumentParser()
xml_help = """
Paths to xml summary files from layer tests.
In case of entries intersection, results will
be merged basing on timestamp - entry from latest
report is be kept.
"""
out_help = "Path where to save html report"
parser.add_argument("--xml", help=xml_help, nargs="*", required=True)
parser.add_argument("--out", help=out_help, default="")
args = parser.parse_args()
def merge_xmls(xmls):
if len(xmls) == 1:
return xmls[0]
summary = ET.Element("report")
summary.set("timestamp", xmls[0].attrib["timestamp"])
results = ET.SubElement(summary, "results")
ops_list = ET.SubElement(summary, "ops_list")
for xml in xmls:
for op in xml.find("ops_list"):
if ops_list.find(op.tag) is None:
ET.SubElement(ops_list, op.tag)
for device in xml.find("results"):
device_results = results.find(device.tag)
if device_results is None:
results.append(device)
else:
for entry in device:
if device_results.find(entry.tag) is not None:
current_timestamp = datetime.strptime(xml.attrib["timestamp"], "%d-%m-%Y %H:%M:%S")
base_timestamp = datetime.strptime(summary.attrib["timestamp"], "%d-%m-%Y %H:%M:%S")
if current_timestamp > base_timestamp:
device_results.find(entry.tag).attrib = entry.attrib
else:
device_results.append(entry)
return summary
xmls = []
for xml in args.xml:
xmls.append(ET.parse(xml).getroot())
root = merge_xmls(xmls)
timestamp = root.attrib["timestamp"]
ops = []
for op in root.find("ops_list"):
ops.append(op.tag)
ordered_ops = sorted(ops)
results = {}
for device in root.find("results"):
results[device.tag] = {op.tag: op.attrib for op in device}
for op in results[device.tag]:
results[device.tag][op]["passrate"] = round(float(results[device.tag][op]["passrate"]), 1)
devices = results.keys()
file_loader = FileSystemLoader('template')
env = Environment(loader=file_loader)
template = env.get_template('report_template.html')
res = template.render(ordered_ops=ordered_ops, devices=devices, results=results, timestamp=timestamp)
with open(os.path.join(args.out, "report.html"), "w") as f:
f.write(res)

View File

@ -0,0 +1,68 @@
<!doctype html>
<html lang="en">
<head>
<!-- Required meta tags -->
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">
<!-- Bootstrap CSS -->
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/css/bootstrap.min.css"
integrity="sha384-Gn5384xqQ1aoWXA+058RXPxPg6fy4IWvTNh0E263XmFcJlSAwiGgFAW/dAiS6JXm" crossorigin="anonymous">
<link rel="stylesheet" type="text/css" href="extensions/filter-control/bootstrap-table-filter-control.css">
<title>Report</title>
</head>
<body>
<script src="extensions/filter-control/bootstrap-table-filter-control.js"></script>
<!-- Optional JavaScript -->
<!-- jQuery first, then Popper.js, then Bootstrap JS -->
<!--
<script src="https://code.jquery.com/jquery-3.2.1.slim.min.js"
integrity="sha384-KJ3o2DKtIkvYIK3UENzmM7KCkRr/rE9/Qpg6aAZGJwFDMVNA/GpGFF93hXpG5KkN"
crossorigin="anonymous"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.12.9/umd/popper.min.js"
integrity="sha384-ApNbgh9B+Y1QKtv3Rn7W3mgPxhU9K/ScQsAP7hUibX39j7fakFPskvXusvfa0b4Q"
crossorigin="anonymous"></script>
<script src="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/js/bootstrap.min.js"
integrity="sha384-JZR6Spejh4U02d8jOt6vLEHfe/JQGiRRSQQxSfFWpi1MquVdAyjUar5+76PVCmYl"
crossorigin="anonymous"></script>
-->
<hr class="my-4">
<h2>Operations coverage summary {{ timestamp }}</h2>
<hr class="my-4">
<table class="table table-hover" id="report">
<thead>
<tr>
<th scope="col">Operation</th>
{% for d in devices -%}
<th>{{ d }}</th>
{% endfor %}
</tr>
</thead>
<tbody>
{% for op in ordered_ops -%}
<tr>
<th scope="row">{{ op }}</th>
{% for d in devices -%}
{% if op in results[d] -%}
<td>
{{ results[d][op].passrate }}% (<span style="color: green">p:{{ results[d][op].passed }}</span>,
<span style="color: red">f:{{ results[d][op].failed }}</span>,s:{{ results[d][op].skipped }})
</td>
{% else -%}
<td class="table-warning">No tests</td>
{% endif -%}
{% endfor %}
</tr>
{% endfor -%}
<tr>
<th scope="row">Total: {{ordered_ops|length}}</th>
{% for d in devices -%}
<td>{{results[d]|length}}</td>
{% endfor %}
</tr>
</tbody>
</table>
</body>
</html>

View File

@ -0,0 +1,25 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "functional_test_utils/skip_tests_config.hpp"
namespace FuncTestUtils {
namespace SkipTestsConfig {
bool disable_tests_skipping = false;
bool currentTestIsDisabled() {
bool skip_test = false;
const auto fullName = ::testing::UnitTest::GetInstance()->current_test_info()->test_case_name()
+ std::string(".") + ::testing::UnitTest::GetInstance()->current_test_info()->name();
for (const auto &pattern : disabledTestPatterns()) {
std::regex re(pattern);
if (std::regex_match(fullName, re))
skip_test = true;
}
return skip_test && !disable_tests_skipping;
}
} // namespace SkipTestsConfig
} // namespace FuncTestUtils

View File

@ -14,16 +14,9 @@ std::vector<std::string> disabledTestPatterns();
namespace FuncTestUtils {
namespace SkipTestsConfig {
inline bool currentTestIsDisabled() {
const auto fullName = ::testing::UnitTest::GetInstance()->current_test_info()->test_case_name()
+ std::string(".") + ::testing::UnitTest::GetInstance()->current_test_info()->name();
for (const auto &pattern : disabledTestPatterns()) {
std::regex re(pattern);
if (std::regex_match(fullName, re))
return true;
}
return false;
}
extern bool disable_tests_skipping;
bool currentTestIsDisabled();
} // namespace SkipTestsConfig
} // namespace FuncTestUtils