[IE TESTS][Conformance] Add conformance tests runner (#3694)

* [IE TESTS][Conformance] Add conformance test target

* Handle creashes. Added crashed tests to the report

* Fix report and flags

* Apply comments

* Apply comment

* Remove flag, fix

* copyrights

* Generate in

* Remove extra code

* fix mac

* CI fixes

* fix win

* Fixes

* Input fixes

* fix flag

* Add generate in

* Fix compare

* Fixes CI
This commit is contained in:
Irina Efode 2021-03-15 12:20:53 +03:00 committed by GitHub
parent 4f43598da4
commit 8e080d4504
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
30 changed files with 1494 additions and 49 deletions

View File

@ -20,6 +20,5 @@ if (ENABLE_MYRIAD)
add_subdirectory(myriad)
endif()
add_subdirectory(conformance)

View File

@ -1,7 +1,8 @@
# Copyright (C) 2020 Intel Corporation
# Copyright (C) 2021 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
add_subdirectory(test_runner)
add_subdirectory(subgraphs_dumper)
add_subdirectory(subgraphs_dumper/tests)

View File

@ -0,0 +1,33 @@
# Copyright (C) 2021 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
set(TARGET_NAME conformanceTests)
list(APPEND EXPORT_DEPENDENCIES
gflags
funcSharedTests
)
addIeTargetTest(
NAME ${TARGET_NAME}
ROOT "${CMAKE_CURRENT_SOURCE_DIR}/include"
ADDITIONAL_SOURCE_DIRS
${CMAKE_CURRENT_SOURCE_DIR}/src
ADD_CPPLINT
INCLUDES
PUBLIC
"${CMAKE_CURRENT_SOURCE_DIR}/include"
LINK_LIBRARIES
PUBLIC
${EXPORT_DEPENDENCIES}
DEPENDENCIES
${EXPORT_DEPENDENCIES}
LABELS
CONFORMANCE
)
ie_faster_build(${TARGET_NAME}
PCH PRIVATE "src/precomp.hpp"
)

View File

@ -0,0 +1,46 @@
# Conformance test runner
## Description
Conformance suit is a set of tests with parameters independent from plug-in specific and limitations. It contains:
* `ReadIR`. Allow to read IRs from folders recursive, infer it and compare results with reference.
## How to build
Run the following command in build directory:
1. Generate CMake project:
```
cmake -DENABLE_FUNCTIONAL_TESTS=ON ..
```
2. Build the target:
```
make conformanceTests
```
## How to run
The target is able to take the following command-line arguments:
* `-h` prints target command-line options with description.
* `--device` specifies target device.
* `--input_folders` specifies folders with IRs to run. The separator is `,`.
* `--disable_test_config` allows to ignore all skipped tests with the exception of `DISABLED_` prefix using.
* `--extend_report` allows not to re-write device results to the report (add results of this run to the existing).
* All `gtest` command-line parameters
The result of execution is `report.xml` file. It demonstrates tests statistic like pass rate, passed, crashed, skipped and failed tests per operation for
devices.
> **NOTE**:
>
> Using of GTest parallel tool to run `conformanceTests` helps to report crashed tests and collect correct statistic
> after unexpected crashes.
>
> The example of usage is:
> ```
> python3 gtest_parallel.py /opt/repo/openvino/bin/intel64/Debug/conformanceTests -d . --gtest_filter=*1613473581844763495*:*roi_align*:*PSROIPooling*:*Add*:*BinaryConv* -- --input_folders=/opt/repo/roi_align,/opt/repo/omz/out --device=CPU
> ```
> All arguments after `--` symbol is forwarding to `conformanceTests` target.
## How to build operation coverage report
Run [the script](./../../../../ie_test_utils/functional_test_utils/layer_tests_summary/summarize.py) to generate `html` report.
The example of using the script is:
```
python3 summarize.py --xml /opt/repo/infrastructure-master/thirdparty/gtest-parallel/report.xml --out /opt/repo/infrastructure-master/thirdparty/gtest-parallel/
```

View File

@ -0,0 +1,10 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
namespace ConformanceTests {
extern const char* targetDevice;
extern std::vector<std::string> IRFolderPaths;
} // namespace ConformanceTests

View File

@ -0,0 +1,39 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <gflags/gflags.h>
#include <iostream>
static const char help_message[] = "Print a usage message.";
static const char disable_test_config_message[] = "Optional. Ignore tests skipping rules and run all the test (except those which are skipped with DISABLED "
"prefix)";
static const char extend_report_config_message[] = "Optional. Extend operation coverage report without overwriting the device results.";
static const char target_device_message[] = "Required. Specify the target device for Conformance Test Suite "
"(the list of available devices is shown below). Default value is CPU. "
"Use \"-d HETERO:<comma-separated_devices_list>\" format to specify HETERO plugin. "
"The application looks for a suitable plugin for the specified device.";
static const char input_folders_message[] = "Required. Paths to the input folders with IRs. Delimiter is `,` symbol.";
DEFINE_bool(h, false, help_message);
DEFINE_string(device, "CPU", target_device_message);
DEFINE_string(input_folders, ".", input_folders_message);
DEFINE_bool(disable_test_config, true, disable_test_config_message);
DEFINE_bool(extend_report, true, extend_report_config_message);
/**
* @brief This function shows a help message
*/
static void showUsage() {
std::cout << std::endl;
std::cout << "Conformance tests [OPTION]" << std::endl;
std::cout << "Options:" << std::endl;
std::cout << std::endl;
std::cout << " -h " << help_message << std::endl;
std::cout << " --disable_test_config " << disable_test_config_message << std::endl;
std::cout << " --extend_report " << extend_report_config_message << std::endl;
std::cout << " --device " << target_device_message << std::endl;
std::cout << " --input_folders \"<paths>\" " << input_folders_message << std::endl;
}

View File

@ -0,0 +1,8 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "functional_test_utils/core_config.hpp"
void CoreConfiguration(LayerTestsUtils::LayerTestsCommon* test) {
}

View File

@ -0,0 +1,57 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "gtest/gtest.h"
#include "shared_test_classes/base/layer_test_utils.hpp"
#include "gflag_config.hpp"
#include "conformance.hpp"
static std::vector<std::string> splitStringByDelimiter(std::string str, const std::string& delimiter = ",") {
size_t delimiterPos;
std::vector<std::string> irPaths;
while ((delimiterPos = str.find(delimiter)) != std::string::npos) {
irPaths.push_back(str.substr(0, delimiterPos));
str = str.substr(delimiterPos + 1);
}
irPaths.push_back(str);
return irPaths;
}
int main(int argc, char* argv[]) {
FuncTestUtils::SkipTestsConfig::disable_tests_skipping = true;
LayerTestsUtils::extendReport = true;
// Workaround for Gtest + Gflag
std::vector<char*> argv_gflags_vec;
int argc_gflags = 0;
for (int i = 0; i < argc; ++i) {
std::string arg(argv[i]);
if (arg.find("gtest") == std::string::npos) {
argv_gflags_vec.emplace_back(argv[i]);
argc_gflags++;
}
}
char** argv_gflags = argv_gflags_vec.data();
// ---------------------------Parsing and validation of input args--------------------------------------
gflags::ParseCommandLineNonHelpFlags(&argc_gflags, &argv_gflags, true);
if (FLAGS_h) {
showUsage();
return 0;
}
if (!FLAGS_disable_test_config) {
FuncTestUtils::SkipTestsConfig::disable_tests_skipping = false;
}
if (!FLAGS_extend_report) {
LayerTestsUtils::extendReport = false;
}
// ---------------------------Initialization of Gtest env -----------------------------------------------
ConformanceTests::targetDevice = FLAGS_device.c_str();
ConformanceTests::IRFolderPaths = splitStringByDelimiter(FLAGS_input_folders);
::testing::InitGoogleTest(&argc, argv);
::testing::AddGlobalTestEnvironment(new LayerTestsUtils::TestEnvironment);
return RUN_ALL_TESTS();;
}

View File

@ -0,0 +1,36 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <gtest/gtest.h>
#include <ngraph/ngraph.hpp>
#include <ngraph/ops.hpp>
#include "ngraph_functions/builders.hpp"
#include "ngraph_functions/subgraph_builders.hpp"
#include <algorithm>
#include <functional>
#include <initializer_list>
#include <iterator>
#include <map>
#include <memory>
#include <numeric>
#include <ostream>
#include <set>
#include <sstream>
#include <string>
#include <tuple>
#include <type_traits>
#include <typeinfo>
#include <unordered_set>
#include <utility>
#include <vector>
#include <cassert>
#include <cctype>
#include <cmath>
#include <cstdlib>
#include <cstring>

View File

@ -0,0 +1,23 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "common_test_utils/file_utils.hpp"
#include "read_ir/read_ir.hpp"
namespace ConformanceTests {
using namespace LayerTestsDefinitions;
const char* targetDevice = "";
std::vector<std::string> IRFolderPaths = {};
namespace {
INSTANTIATE_TEST_CASE_P(conformance,
ReadIRTest,
::testing::Combine(
::testing::ValuesIn(CommonTestUtils::getFileListByPatternRecursive(IRFolderPaths, std::regex(R"(.*\.xml)"))),
::testing::Values(targetDevice)),
ReadIRTest::getTestCaseName);
} // namespace
} // namespace ConformanceTests

View File

@ -0,0 +1,12 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include <string>
#include "functional_test_utils/skip_tests_config.hpp"
std::vector<std::string> disabledTestPatterns() {
return {};
}

View File

@ -0,0 +1,11 @@
// Copyright (C) 2019-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "shared_test_classes/read_ir/read_ir.hpp"
namespace LayerTestsDefinitions {
TEST_P(ReadIRTest, ReadIR) {
Run();
}
} // namespace LayerTestsDefinitions

View File

@ -8,12 +8,14 @@
int main(int argc, char* argv[]) {
FuncTestUtils::SkipTestsConfig::disable_tests_skipping = false;
LayerTestsUtils::extendReport = false;
bool print_custom_help = false;
for (int i = 0; i < argc; ++i) {
if (std::string(argv[i]) == "--disable_tests_skipping") {
FuncTestUtils::SkipTestsConfig::disable_tests_skipping = true;
}
if (std::string(argv[i]) == "--help") {
} else if (std::string(argv[i]) == "--extend_report") {
LayerTestsUtils::extendReport = true;
} else if (std::string(argv[i]) == "--help") {
print_custom_help = true;
}
}
@ -22,6 +24,8 @@ int main(int argc, char* argv[]) {
std::cout << " --disable_tests_skipping" << std::endl;
std::cout << " Ignore tests skipping rules and run all the test" << std::endl;
std::cout << " (except those which are skipped with DISABLED prefix)" << std::endl;
std::cout << " --extend_report" << std::endl;
std::cout << " Extend operation coverage report without overwriting the device results" << std::endl;
std::cout << std::endl;
}
::testing::InitGoogleTest(&argc, argv);

View File

@ -31,6 +31,8 @@
namespace LayerTestsUtils {
extern bool extendReport;
// filename length limitation due to Windows constraints (max 256 characters)
constexpr std::size_t maxFileNameLength = 140;
@ -53,25 +55,28 @@ struct PassRate {
enum Statuses {
PASSED,
FAILED,
SKIPPED
SKIPPED,
CRASHED
};
unsigned long passed = 0;
unsigned long failed = 0;
unsigned long skipped = 0;
unsigned long crashed = 0;
PassRate() = default;
PassRate(unsigned long p, unsigned long f, unsigned long s) {
PassRate(unsigned long p, unsigned long f, unsigned long s, unsigned long c) {
passed = p;
failed = f;
skipped = s;
crashed = c;
}
float getPassrate() const {
if (passed + failed == 0) {
if (passed + failed + crashed == 0) {
return 0.f;
} else {
return passed * 100.f / (passed + failed + skipped);
return passed * 100.f / (passed + failed + skipped + crashed);
}
}
};
@ -96,6 +101,8 @@ protected:
std::map<ngraph::NodeTypeInfo, PassRate> getOPsStats() { return opsStats; }
std::map<std::string, PassRate> getOpStatisticFromReport();
std::string getDeviceName() const { return deviceName; }
void setDeviceName(std::string device) { deviceName = device; }
@ -113,9 +120,7 @@ public:
class TestEnvironment : public ::testing::Environment {
public:
void TearDown() override;
private:
std::string reportFileName = "report.xml";
static void saveReport();
};
using TargetDevice = std::string;
@ -140,6 +145,14 @@ public:
virtual void Serialize();
static void Compare(const std::vector<std::vector<std::uint8_t>> &expected,
const std::vector<InferenceEngine::Blob::Ptr> &actual,
float threshold);
static void Compare(const std::vector<std::uint8_t> &expected,
const InferenceEngine::Blob::Ptr &actual,
float threshold);
virtual void Compare(const std::vector<std::vector<std::uint8_t>> &expectedOutputs,
const std::vector<InferenceEngine::Blob::Ptr> &actualOutputs);
@ -155,9 +168,6 @@ public:
std::string getRuntimePrecision(const std::string& layerName);
protected:
LayerTestsCommon();
template<class T>
static void Compare(const T *expected, const T *actual, std::size_t size, T threshold) {
for (std::size_t i = 0; i < size; ++i) {
@ -177,6 +187,9 @@ protected:
}
}
protected:
LayerTestsCommon();
RefMode GetRefMode() {
return refMode;
}
@ -211,7 +224,7 @@ protected:
virtual std::vector<std::vector<std::uint8_t>> CalculateRefs();
std::vector<InferenceEngine::Blob::Ptr> GetOutputs();
virtual std::vector<InferenceEngine::Blob::Ptr> GetOutputs();
InferenceEngine::InferRequest inferRequest;

View File

@ -0,0 +1,19 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_core.hpp>
#include "ngraph/node.hpp"
namespace LayerTestsDefinitions {
using CompareMap = std::map<ngraph::NodeTypeInfo, std::function<void(
const std::shared_ptr<ngraph::Node> node,
const std::vector<std::vector<std::uint8_t>>& expected,
const std::vector<InferenceEngine::Blob::Ptr>& actual,
float threshold)>>;
CompareMap getCompareMap();
} // namespace LayerTestsDefinitions

View File

@ -0,0 +1,17 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_core.hpp>
#include "ngraph/node.hpp"
namespace LayerTestsDefinitions {
using InputsMap = std::map<ngraph::NodeTypeInfo, std::function<InferenceEngine::Blob::Ptr(
const std::shared_ptr<ngraph::Node> node,
const InferenceEngine::InputInfo& info,
size_t port)>>;
InputsMap getInputMap();
} // namespace LayerTestsDefinitions

View File

@ -0,0 +1,27 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <shared_test_classes/single_layer/proposal.hpp>
#include "shared_test_classes/base/layer_test_utils.hpp"
#include "shared_test_classes/single_layer/psroi_pooling.hpp"
#include "shared_test_classes/single_layer/roi_pooling.hpp"
#include "shared_test_classes/single_layer/roi_align.hpp"
namespace LayerTestsDefinitions {
class ReadIRTest : public testing::WithParamInterface<std::tuple<std::string, std::string>>,
public LayerTestsUtils::LayerTestsCommon {
public:
static std::string getTestCaseName(const testing::TestParamInfo<std::tuple<std::string, std::string>> &obj);
protected:
void SetUp() override;
void GenerateInputs() override;
void Compare(const std::vector<std::vector<std::uint8_t>> &expected,
const std::vector<InferenceEngine::Blob::Ptr> &actual) override;
std::vector<InferenceEngine::Blob::Ptr> GetOutputs() override;
private:
std::string pathToModel;
};
} // namespace LayerTestsDefinitions

View File

@ -33,6 +33,9 @@ class PSROIPoolingLayerTest : public testing::WithParamInterface<psroiParams>,
public:
static std::string getTestCaseName(testing::TestParamInfo<psroiParams> obj);
void GenerateInputs() override;
static void fillROITensor(float* buffer, int numROIs, int batchSize,
int height, int width, int groupSize,
float spatialScale, int spatialBinsX, int spatialBinsY, const std::string& mode);
protected:
void SetUp() override;

View File

@ -23,6 +23,9 @@ class ROIAlignLayerTest : public testing::WithParamInterface<roialignParams>,
virtual public LayerTestsUtils::LayerTestsCommon {
public:
static std::string getTestCaseName(testing::TestParamInfo<roialignParams> obj);
static void fillCoordTensor(std::vector<float>& coords, int height, int width,
float spatialScale, int pooledRatio, int pooledH, int pooledW);
static void fillIdxTensor(std::vector<int>& idx, int batchSize);
protected:
void SetUp() override;

View File

@ -2,6 +2,7 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <fstream>
#include <signal.h>
#include <transformations/serialize.hpp>
#include <transformations/op_conversions/convert_batch_to_space.hpp>
@ -16,6 +17,9 @@
namespace LayerTestsUtils {
bool isReported = false;
bool extendReport = true;
Summary *Summary::p_instance = nullptr;
SummaryDestroyer Summary::destroyer;
@ -41,31 +45,67 @@ void Summary::updateOPsStats(ngraph::NodeTypeInfo op, PassRate::Statuses status)
auto &passrate = it->second;
switch (status) {
case PassRate::PASSED:
passrate.passed += 1;
passrate.passed++;
passrate.crashed--;
break;
case PassRate::FAILED:
passrate.failed += 1;
passrate.failed++;
passrate.crashed--;
break;
case PassRate::SKIPPED:
passrate.skipped += 1;
passrate.skipped++;
break;
case PassRate::CRASHED:
passrate.crashed++;
break;
}
} else {
switch (status) {
case PassRate::PASSED:
opsStats[op] = PassRate(1, 0, 0);
opsStats[op] = PassRate(1, 0, 0, 0);
break;
case PassRate::FAILED:
opsStats[op] = PassRate(0, 1, 0);
opsStats[op] = PassRate(0, 1, 0, 0);
break;
case PassRate::SKIPPED:
opsStats[op] = PassRate(0, 0, 1);
opsStats[op] = PassRate(0, 0, 1, 0);
break;
case PassRate::CRASHED:
opsStats[op] = PassRate(0, 0, 0, 1);
break;
}
}
}
void TestEnvironment::TearDown() {
std::map<std::string, PassRate> Summary::getOpStatisticFromReport() {
pugi::xml_document doc;
std::ifstream file;
file.open(CommonTestUtils::REPORT_FILENAME);
pugi::xml_node root;
doc.load_file(CommonTestUtils::REPORT_FILENAME);
root = doc.child("report");
pugi::xml_node resultsNode = root.child("results");
pugi::xml_node currentDeviceNode = resultsNode.child(deviceName.c_str());
std::map<std::string, PassRate> oldOpsStat;
for (auto &child : currentDeviceNode.children()) {
std::string entry = child.name();
auto p = std::stoi(child.attribute("passed").value());
auto f = std::stoi(child.attribute("failed").value());
auto s = std::stoi(child.attribute("skipped").value());
auto c = std::stoi(child.attribute("crashed").value());
PassRate obj(p, f, s, c);
oldOpsStat.insert({entry, obj});
}
return oldOpsStat;
}
void TestEnvironment::saveReport() {
if (isReported) {
return;
}
std::vector<ngraph::OpSet> opsets;
opsets.push_back(ngraph::get_opset1());
opsets.push_back(ngraph::get_opset2());
@ -80,13 +120,13 @@ void TestEnvironment::TearDown() {
opsInfo.insert(type_info_set.begin(), type_info_set.end());
}
auto &s = Summary::getInstance();
auto stats = s.getOPsStats();
auto &summary = Summary::getInstance();
auto stats = summary.getOPsStats();
pugi::xml_document doc;
std::ifstream file;
file.open(reportFileName);
file.open(CommonTestUtils::REPORT_FILENAME);
time_t rawtime;
struct tm *timeinfo;
@ -100,14 +140,14 @@ void TestEnvironment::TearDown() {
pugi::xml_node root;
if (file) {
doc.load_file(reportFileName.c_str());
doc.load_file(CommonTestUtils::REPORT_FILENAME);
root = doc.child("report");
//Ugly but shorter than to write predicate for find_atrribute() to update existing one
root.remove_attribute("timestamp");
root.append_attribute("timestamp").set_value(timeNow);
root.remove_child("ops_list");
root.child("results").remove_child(s.deviceName.c_str());
root.child("results").remove_child(summary.deviceName.c_str());
} else {
root = doc.append_child("report");
root.append_attribute("timestamp").set_value(timeNow);
@ -122,19 +162,57 @@ void TestEnvironment::TearDown() {
}
pugi::xml_node resultsNode = root.child("results");
pugi::xml_node currentDeviceNode = resultsNode.append_child(s.deviceName.c_str());
pugi::xml_node currentDeviceNode = resultsNode.append_child(summary.deviceName.c_str());
std::unordered_set<std::string> opList;
for (const auto &it : stats) {
std::string name = std::string(it.first.name) + "-" + std::to_string(it.first.version);
opList.insert(name);
pugi::xml_node entry = currentDeviceNode.append_child(name.c_str());
entry.append_attribute("passed").set_value(it.second.passed);
entry.append_attribute("failed").set_value(it.second.failed);
entry.append_attribute("skipped").set_value(it.second.skipped);
entry.append_attribute("crashed").set_value(it.second.crashed);
entry.append_attribute("passrate").set_value(it.second.getPassrate());
}
bool result = doc.save_file(reportFileName.c_str());
if (!result) {
std::cout << "Failed to write report to " << reportFileName << "!" << std::endl;
if (extendReport && file) {
auto opStataFromReport = summary.getOpStatisticFromReport();
for (auto& item : opStataFromReport) {
pugi::xml_node entry;
if (opList.find(item.first) == opList.end()) {
entry = currentDeviceNode.append_child(item.first.c_str());
entry.append_attribute("passed").set_value(item.second.passed);
entry.append_attribute("failed").set_value(item.second.failed);
entry.append_attribute("skipped").set_value(item.second.skipped);
entry.append_attribute("crashed").set_value(item.second.crashed);
entry.append_attribute("passrate").set_value(item.second.getPassrate());
} else {
entry = currentDeviceNode.child(item.first.c_str());
auto p = std::stoi(entry.attribute("passed").value()) + item.second.passed;
auto f = std::stoi(entry.attribute("failed").value()) + item.second.failed;
auto s = std::stoi(entry.attribute("skipped").value()) + item.second.skipped;
auto c = std::stoi(entry.attribute("crashed").value()) + item.second.crashed;
PassRate obj(p, f, s, c);
entry.attribute("passed").set_value(obj.passed);
entry.attribute("failed").set_value(obj.failed);
entry.attribute("skipped").set_value(obj.skipped);
entry.attribute("crashed").set_value(obj.crashed);
entry.attribute("passrate").set_value(obj.getPassrate());
}
}
}
bool result = doc.save_file(CommonTestUtils::REPORT_FILENAME);
if (!result) {
std::cout << "Failed to write report to " << CommonTestUtils::REPORT_FILENAME << "!" << std::endl;
} else {
isReported = true;
}
}
void TestEnvironment::TearDown() {
saveReport();
}
LayerTestsCommon::LayerTestsCommon() : threshold(1e-2f) {
@ -172,9 +250,18 @@ void LayerTestsCommon::Run() {
}
};
auto crashHandler = [](int errCode) {
TestEnvironment::saveReport();
std::cout << "Unexpected application crash!" << std::endl;
std::abort();
};
signal(SIGSEGV, crashHandler);
if (FuncTestUtils::SkipTestsConfig::currentTestIsDisabled()) {
reportStatus(PassRate::Statuses::SKIPPED);
GTEST_SKIP() << "Disabled test due to configuration" << std::endl;
} else {
reportStatus(PassRate::Statuses::CRASHED);
}
try {
@ -225,8 +312,19 @@ void LayerTestsCommon::Serialize() {
InferenceEngine::Blob::Ptr LayerTestsCommon::GenerateInput(const InferenceEngine::InputInfo &info) const {
return FuncTestUtils::createAndFillBlob(info.getTensorDesc());
}
void LayerTestsCommon::Compare(const std::vector<std::vector<std::uint8_t>> &expectedOutputs,
const std::vector<InferenceEngine::Blob::Ptr> &actualOutputs,
float threshold) {
for (std::size_t outputIndex = 0; outputIndex < expectedOutputs.size(); ++outputIndex) {
const auto &expected = expectedOutputs[outputIndex];
const auto &actual = actualOutputs[outputIndex];
Compare(expected, actual, threshold);
}
}
void LayerTestsCommon::Compare(const std::vector<std::uint8_t> &expected, const InferenceEngine::Blob::Ptr &actual) {
void LayerTestsCommon::Compare(const std::vector<std::uint8_t> &expected,
const InferenceEngine::Blob::Ptr &actual,
float threshold) {
ASSERT_EQ(expected.size(), actual->byteSize());
const auto &expectedBuffer = expected.data();
@ -284,6 +382,10 @@ void LayerTestsCommon::Compare(const std::vector<std::uint8_t> &expected, const
}
}
void LayerTestsCommon::Compare(const std::vector<std::uint8_t> &expected, const InferenceEngine::Blob::Ptr &actual) {
Compare(expected, actual, threshold);
}
void LayerTestsCommon::Compare(const InferenceEngine::Blob::Ptr &expected, const InferenceEngine::Blob::Ptr &actual) {
auto get_raw_buffer = [](const InferenceEngine::Blob::Ptr &blob) {
auto memory = InferenceEngine::as<InferenceEngine::MemoryBlob>(blob);
@ -438,11 +540,7 @@ std::vector<InferenceEngine::Blob::Ptr> LayerTestsCommon::GetOutputs() {
void LayerTestsCommon::Compare(const std::vector<std::vector<std::uint8_t>> &expectedOutputs,
const std::vector<InferenceEngine::Blob::Ptr> &actualOutputs) {
for (std::size_t outputIndex = 0; outputIndex < expectedOutputs.size(); ++outputIndex) {
const auto &expected = expectedOutputs[outputIndex];
const auto &actual = actualOutputs[outputIndex];
Compare(expected, actual);
}
Compare(expectedOutputs, actualOutputs, threshold);
}
void LayerTestsCommon::Validate() {

View File

@ -0,0 +1,207 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph/ops.hpp"
#include "shared_test_classes/base/layer_test_utils.hpp"
#include "shared_test_classes/read_ir/compare_results.hpp"
namespace LayerTestsDefinitions {
namespace {
void compare(const std::shared_ptr<ngraph::Node> node,
const std::vector<std::vector<std::uint8_t>>& expected,
const std::vector<InferenceEngine::Blob::Ptr>& actual,
float threshold) {
LayerTestsUtils::LayerTestsCommon::Compare(expected, actual, threshold);
}
void compare(const std::shared_ptr<ngraph::op::v0::DetectionOutput> node,
const std::vector<std::uint8_t>& expected,
const std::vector<InferenceEngine::Blob::Ptr>& actual,
float threshold) {
ASSERT_EQ(expected.size(), actual.front()->byteSize());
size_t expSize = 0;
size_t actSize = 0;
const auto &expectedBuffer = expected.data();
auto memory = InferenceEngine::as<InferenceEngine::MemoryBlob>(actual.front());
IE_ASSERT(memory);
const auto lockedMemory = memory->wmap();
const auto actualBuffer = lockedMemory.as<const std::uint8_t *>();
const float *expBuf = reinterpret_cast<const float *>(expectedBuffer);
const float *actBuf = reinterpret_cast<const float *>(actualBuffer);
for (size_t i = 0; i < actual.front()->size(); i+=7) {
if (expBuf[i] == -1)
break;
expSize += 7;
}
for (size_t i = 0; i < actual.front()->size(); i+=7) {
if (actBuf[i] == -1)
break;
actSize += 7;
}
ASSERT_EQ(expSize, actSize);
LayerTestsUtils::LayerTestsCommon::Compare<float>(expBuf, actBuf, expSize, 1e-2f);
}
namespace Proposal {
template <class T>
void Compare(const T *expected, const T *actual, std::size_t size,
T threshold, const std::size_t output_index, size_t& num_selected_boxes) {
for (std::size_t i = 0; i < size; ++i) {
const auto &ref = expected[i];
const auto &res = actual[i];
// verify until first -1 appears in the 1st output.
if (output_index == 0 &&
CommonTestUtils::ie_abs(ref - static_cast<T>(-1)) <= threshold) {
// output0 shape = {x, 5}
// output1 shape = {x}
// setting the new_size for output1 verification
num_selected_boxes = i / 5;
return;
}
const auto absoluteDifference = CommonTestUtils::ie_abs(res - ref);
if (absoluteDifference <= threshold) {
continue;
}
const auto max = std::max(CommonTestUtils::ie_abs(res),
CommonTestUtils::ie_abs(ref));
float diff =
static_cast<float>(absoluteDifference) / static_cast<float>(max);
ASSERT_TRUE(max != 0 && (diff <= static_cast<float>(threshold)))
<< "Relative comparison of values expected: " << ref
<< " and actual: " << res << " at index " << i
<< " with threshold " << threshold << " failed";
}
}
} // namespace Proposal
void compare(const std::shared_ptr<ngraph::op::v4::Proposal> node,
const std::vector<std::vector<std::uint8_t>>& expectedOutputs,
const std::vector<InferenceEngine::Blob::Ptr>& actualOutputs,
float threshold) {
size_t num_selected_boxes = 0;
for (std::size_t outputIndex = 0; outputIndex < expectedOutputs.size(); ++outputIndex) {
const auto &expected = expectedOutputs[outputIndex];
const auto &actual = actualOutputs[outputIndex];
ASSERT_EQ(expected.size(), actual->byteSize());
const auto &expectedBuffer = expected.data();
auto memory = InferenceEngine::as<InferenceEngine::MemoryBlob>(actual);
IE_ASSERT(memory);
const auto lockedMemory = memory->rmap();
const auto actualBuffer = lockedMemory.as<const std::uint8_t *>();
const auto &precision = actual->getTensorDesc().getPrecision();
auto size = actual->size();
// verifying the first output if there was less proposals than space
// provided,
// num_selected_boxes was set, take this into consideration while verifying the 2nd
// output
if (outputIndex == 1 && num_selected_boxes) {
size = num_selected_boxes;
}
switch (precision) {
case InferenceEngine::Precision::BF16:
Proposal::Compare(
reinterpret_cast<const ngraph::bfloat16 *>(expectedBuffer),
reinterpret_cast<const ngraph::bfloat16 *>(actualBuffer), size,
ngraph::bfloat16(threshold), outputIndex, num_selected_boxes);
break;
case InferenceEngine::Precision::FP16:
Proposal::Compare(
reinterpret_cast<const ngraph::float16 *>(expectedBuffer),
reinterpret_cast<const ngraph::float16 *>(actualBuffer), size,
ngraph::float16(threshold), outputIndex, num_selected_boxes);
break;
case InferenceEngine::Precision::FP32:
Proposal::Compare<float>(
reinterpret_cast<const float *>(expectedBuffer),
reinterpret_cast<const float *>(actualBuffer), size,
threshold, outputIndex, num_selected_boxes);
break;
default:
FAIL() << "Comparator for " << precision << " precision isn't supported";
}
}
}
void compare(const std::shared_ptr<ngraph::op::v5::NonMaxSuppression> node,
const std::vector<std::vector<std::uint8_t>>& expectedOutputs,
const std::vector<InferenceEngine::Blob::Ptr>& actualOutputs,
float threshold) {
for (int outputIndex = static_cast<int>(expectedOutputs.size()) - 1; outputIndex >=0 ; outputIndex--) {
const auto& expected = expectedOutputs[outputIndex];
const auto& actual = actualOutputs[outputIndex];
const auto &expectedBuffer = expected.data();
auto memory = InferenceEngine::as<InferenceEngine::MemoryBlob>(actual);
IE_ASSERT(memory);
const auto lockedMemory = memory->wmap();
const auto actualBuffer = lockedMemory.as<const uint8_t *>();
if (outputIndex == 2) {
if (expected.size() != actual->byteSize())
throw std::runtime_error("Expected and actual size 3rd output have different size");
}
const auto &precision = actual->getTensorDesc().getPrecision();
size_t size = expected.size() / actual->getTensorDesc().getPrecision().size();
switch (precision) {
case InferenceEngine::Precision::FP32: {
LayerTestsUtils::LayerTestsCommon::Compare(
reinterpret_cast<const float *>(expectedBuffer), reinterpret_cast<const float *>(actualBuffer), size, threshold);
const auto fBuffer = lockedMemory.as<const float *>();
for (int i = size; i < actual->size(); i++) {
ASSERT_TRUE(fBuffer[i] == -1.f) << "Invalid default value: " << fBuffer[i] << " at index: " << i;
}
break;
}
case InferenceEngine::Precision::I32: {
LayerTestsUtils::LayerTestsCommon::Compare(
reinterpret_cast<const int32_t *>(expectedBuffer), reinterpret_cast<const int32_t *>(actualBuffer), size, 0);
const auto iBuffer = lockedMemory.as<const int *>();
for (int i = size; i < actual->size(); i++) {
ASSERT_TRUE(iBuffer[i] == -1) << "Invalid default value: " << iBuffer[i] << " at index: " << i;
}
break;
}
default:
FAIL() << "Comparator for " << precision << " precision isn't supported";
}
}
}
template<typename T>
void compareResults(const std::shared_ptr<ngraph::Node> node,
const std::vector<std::vector<std::uint8_t>>& expected,
const std::vector<InferenceEngine::Blob::Ptr>& actual,
float threshold) {
return compare(ngraph::as_type_ptr<T>(node), expected, actual, threshold);
}
} // namespace
CompareMap getCompareMap() {
CompareMap compareMap{
#define NGRAPH_OP(NAME, NAMESPACE) {NAMESPACE::NAME::type_info, compareResults<NAMESPACE::NAME>},
#include "ngraph/opsets/opset1_tbl.hpp"
#include "ngraph/opsets/opset2_tbl.hpp"
#include "ngraph/opsets/opset3_tbl.hpp"
#include "ngraph/opsets/opset4_tbl.hpp"
#include "ngraph/opsets/opset5_tbl.hpp"
#include "ngraph/opsets/opset6_tbl.hpp"
#undef NGRAPH_OP
};
return compareMap;
}
} // namespace LayerTestsDefinitions

View File

@ -0,0 +1,660 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph/ops.hpp"
#include "shared_test_classes/single_layer/roi_align.hpp"
#include "shared_test_classes/single_layer/psroi_pooling.hpp"
#include "shared_test_classes/read_ir/generate_inputs.hpp"
namespace LayerTestsDefinitions {
namespace {
InferenceEngine::Blob::Ptr generate(const std::shared_ptr<ngraph::Node> node,
const InferenceEngine::InputInfo& info,
size_t port) {
return FuncTestUtils::createAndFillBlob(info.getTensorDesc());
}
namespace Activation {
InferenceEngine::Blob::Ptr generate(const InferenceEngine::InputInfo& info,
bool inPrcSigned,
int32_t data_start_from = -10,
uint32_t data_range = 20,
int32_t resolution = 32768) {
if (!inPrcSigned) {
data_range = 15;
data_start_from = 0;
}
return FuncTestUtils::createAndFillBlob(info.getTensorDesc(), data_range,
data_start_from,
resolution);
}
} // namespace Activation
InferenceEngine::Blob::Ptr generate(const std::shared_ptr<ngraph::op::v0::Abs> node,
const InferenceEngine::InputInfo& info,
size_t port) {
return Activation::generate(info, node->get_input_element_type(0).is_signed());
}
InferenceEngine::Blob::Ptr generate(const std::shared_ptr<ngraph::op::v0::Acos> node,
const InferenceEngine::InputInfo& info,
size_t port) {
return Activation::generate(info, node->get_input_element_type(0).is_signed(), -1, 2);
}
InferenceEngine::Blob::Ptr generate(const std::shared_ptr<ngraph::op::v0::Asin> node,
const InferenceEngine::InputInfo& info,
size_t port) {
return Activation::generate(info, node->get_input_element_type(0).is_signed(), -1, 2);
}
InferenceEngine::Blob::Ptr generate(const std::shared_ptr<ngraph::op::v0::Atan> node,
const InferenceEngine::InputInfo& info,
size_t port) {
return Activation::generate(info, node->get_input_element_type(0).is_signed(), -1, 2);
}
InferenceEngine::Blob::Ptr generate(const std::shared_ptr<ngraph::op::v0::Ceiling> node,
const InferenceEngine::InputInfo& info,
size_t port) {
return Activation::generate(info, node->get_input_element_type(0).is_signed(), -1000, 2000);
}
InferenceEngine::Blob::Ptr generate(const std::shared_ptr<ngraph::op::v0::Clamp> node,
const InferenceEngine::InputInfo& info,
size_t port) {
return Activation::generate(info, node->get_input_element_type(0).is_signed());
}
InferenceEngine::Blob::Ptr generate(const std::shared_ptr<ngraph::op::v0::Cos> node,
const InferenceEngine::InputInfo& info,
size_t port) {
return Activation::generate(info, node->get_input_element_type(0).is_signed());
}
InferenceEngine::Blob::Ptr generate(const std::shared_ptr<ngraph::op::v0::Cosh> node,
const InferenceEngine::InputInfo& info,
size_t port) {
return Activation::generate(info, node->get_input_element_type(0).is_signed());
}
InferenceEngine::Blob::Ptr generate(const std::shared_ptr<ngraph::op::v0::DetectionOutput> node,
const InferenceEngine::InputInfo& info,
size_t port) {
InferenceEngine::Blob::Ptr blob;
blob = make_blob_with_precision(info.getTensorDesc());
blob->allocate();
int32_t resolution = 1;
uint32_t range = 1;
switch (port) {
case 1:
case 3:
resolution = 1000;
break;
case 2:
if (node->get_attrs().normalized) {
resolution = 1000;
} else {
range = 10;
}
break;
default:
resolution = 10;
break;
}
CommonTestUtils::fill_data_random_float<InferenceEngine::Precision::FP32>(blob, range, 0, resolution);
return blob;
}
InferenceEngine::Blob::Ptr generate(const std::shared_ptr<ngraph::op::v0::Elu> node,
const InferenceEngine::InputInfo& info,
size_t port) {
return Activation::generate(info, node->get_input_element_type(0).is_signed());
}
InferenceEngine::Blob::Ptr generate(const std::shared_ptr<ngraph::op::v0::Erf> node,
const InferenceEngine::InputInfo& info,
size_t port) {
return Activation::generate(info, node->get_input_element_type(0).is_signed());
}
InferenceEngine::Blob::Ptr generate(const std::shared_ptr<ngraph::op::v0::Exp> node,
const InferenceEngine::InputInfo& info,
size_t port) {
return Activation::generate(info, node->get_input_element_type(0).is_signed());
}
InferenceEngine::Blob::Ptr generate(const std::shared_ptr<ngraph::op::v0::Floor> node,
const InferenceEngine::InputInfo& info,
size_t port) {
return Activation::generate(info, node->get_input_element_type(0).is_signed());
}
InferenceEngine::Blob::Ptr generate(const std::shared_ptr<ngraph::op::v0::Gelu> node,
const InferenceEngine::InputInfo& info,
size_t port) {
return Activation::generate(info, node->get_input_element_type(0).is_signed());
}
InferenceEngine::Blob::Ptr generate(const std::shared_ptr<ngraph::op::v0::HardSigmoid> node,
const InferenceEngine::InputInfo& info,
size_t port) {
switch (port) {
case 1: {
std::vector<float> alpha(node->get_input_shape(1).size(), 0.2f);
return FuncTestUtils::createAndFillBlobWithFloatArray(info.getTensorDesc(), alpha.data(), alpha.size());
}
case 2: {
std::vector<float> beta(node->get_input_shape(2).size(), 0.5f);
return FuncTestUtils::createAndFillBlobWithFloatArray(info.getTensorDesc(), beta.data(), beta.size());
}
default: {
return Activation::generate(info, node->get_input_element_type(0).is_signed());
}
}
}
InferenceEngine::Blob::Ptr generate(const std::shared_ptr<ngraph::op::v0::FakeQuantize> node,
const InferenceEngine::InputInfo& info,
size_t port) {
auto constShapes = node->get_input_shape(0);
int seed = 1;
size_t constDataSize = ngraph::shape_size(constShapes);
std::vector<float> inputLowData, inputHighData, outputLowData, outputHighData;
inputLowData = NGraphFunctions::Utils::generateVector<ngraph::element::Type_t::f32>(constDataSize, 10, 1, seed);
if (node->get_levels() != 2) {
inputHighData = NGraphFunctions::Utils::generateVector<ngraph::element::Type_t::f32>(constDataSize, 10, 1, seed);
outputLowData = NGraphFunctions::Utils::generateVector<ngraph::element::Type_t::f32>(constDataSize, 10, 1, seed);
outputHighData = NGraphFunctions::Utils::generateVector<ngraph::element::Type_t::f32>(constDataSize, 10, 1, seed);
} else {
inputHighData = inputLowData;
outputLowData = NGraphFunctions::Utils::generateVector<ngraph::element::Type_t::f32>(constDataSize, 10, 1, seed);
outputHighData = NGraphFunctions::Utils::generateVector<ngraph::element::Type_t::f32>(constDataSize, 10, 1, seed);
for (int i = 0; i < constDataSize; i++) {
if (outputLowData[i] > outputHighData[i]) {
outputLowData[i] = 1;
outputHighData[i] = 0;
} else {
outputLowData[i] = 0;
outputHighData[i] = 1;
}
}
}
for (int i = 0; i < constDataSize; i++) {
inputLowData[i] = std::min(inputLowData[i], inputHighData[i]);
inputHighData[i] = std::max(inputLowData[i], inputHighData[i]);
if (inputLowData[i] == inputHighData[i])
inputHighData[i] += 1;
}
for (int i = 0; i < constDataSize; i++) {
outputLowData[i] = std::min(outputLowData[i], outputHighData[i]);
outputHighData[i] = std::max(outputLowData[i], outputHighData[i]);
if (outputLowData[i] == outputHighData[i])
outputHighData[i] += 1;
}
switch (port) {
case 1:
return FuncTestUtils::createAndFillBlobWithFloatArray(info.getTensorDesc(), inputLowData.data(), inputLowData.size());
case 2:
return FuncTestUtils::createAndFillBlobWithFloatArray(info.getTensorDesc(), inputHighData.data(), inputHighData.size());
case 3:
return FuncTestUtils::createAndFillBlobWithFloatArray(info.getTensorDesc(), outputLowData.data(), outputLowData.size());
case 4:
return FuncTestUtils::createAndFillBlobWithFloatArray(info.getTensorDesc(), outputHighData.data(), outputHighData.size());
default: {
float resolution = 1.0f, min = +5.f, max = +25.f;
return FuncTestUtils::createAndFillBlob(info.getTensorDesc(), max - min, min, resolution, seed);
}
}
}
InferenceEngine::Blob::Ptr generate(const std::shared_ptr<ngraph::op::v0::Log> node,
const InferenceEngine::InputInfo& info,
size_t port) {
return Activation::generate(info, node->get_input_element_type(0).is_signed(), 1, 20);
}
InferenceEngine::Blob::Ptr generate(const std::shared_ptr<ngraph::op::v0::Negative> node,
const InferenceEngine::InputInfo& info,
size_t port) {
return Activation::generate(info, node->get_input_element_type(0).is_signed());
}
InferenceEngine::Blob::Ptr generate(const std::shared_ptr<ngraph::op::v0::PRelu> node,
const InferenceEngine::InputInfo& info,
size_t port) {
switch (port) {
case 1: {
std::vector<float> negativeSlope(node->get_input_shape(1).size(), -0.01f);
FuncTestUtils::createAndFillBlobWithFloatArray(info.getTensorDesc(), negativeSlope.data(), negativeSlope.size());
}
default: {
return Activation::generate(info, node->get_input_element_type(0).is_signed());
}
}
}
InferenceEngine::Blob::Ptr generate(const std::shared_ptr<ngraph::op::v0::PSROIPooling> node,
const InferenceEngine::InputInfo& info,
size_t port) {
const auto& inputShape = node->get_input_shape(0);
if (port == 1) {
InferenceEngine::Blob::Ptr blob;
blob = make_blob_with_precision(info.getTensorDesc());
blob->allocate();
PSROIPoolingLayerTest::fillROITensor(blob->buffer(),
blob->size() / 5,
inputShape[0],
inputShape[2],
inputShape[3],
node->get_group_size(),
node->get_spatial_scale(),
node->get_spatial_bins_x(),
node->get_spatial_bins_y(),
node->get_mode());
return blob;
}
return FuncTestUtils::createAndFillBlob(info.getTensorDesc());
}
InferenceEngine::Blob::Ptr generate(const std::shared_ptr<ngraph::op::v0::ROIPooling> node,
const InferenceEngine::InputInfo& info,
size_t port) {
const auto& inputShape = node->get_input_shape(0);
if (port == 1) {
InferenceEngine::Blob::Ptr blob;
blob = make_blob_with_precision(info.getTensorDesc());
blob->allocate();
CommonTestUtils::fill_data_roi(blob->buffer(),
blob->size(),
node->get_input_shape(0).front() - 1,
inputShape[2],
inputShape[3],
1.0f,
node->get_method() == "max");
return blob;
}
return FuncTestUtils::createAndFillBlob(info.getTensorDesc());
}
InferenceEngine::Blob::Ptr generate(const std::shared_ptr<ngraph::op::v0::Selu> node,
const InferenceEngine::InputInfo& info,
size_t port) {
switch (port) {
case 1: {
std::vector<float> alpha(node->get_input_shape(1).size(), 1.6732f);
return FuncTestUtils::createAndFillBlobWithFloatArray(info.getTensorDesc(), alpha.data(), alpha.size());
}
case 2: {
std::vector<float> lambda(node->get_input_shape(1).size(), 1.0507f);
return FuncTestUtils::createAndFillBlobWithFloatArray(info.getTensorDesc(), lambda.data(), lambda.size());
}
default:
return Activation::generate(info, node->get_input_element_type(0).is_signed());
}
}
InferenceEngine::Blob::Ptr generate(const std::shared_ptr<ngraph::op::v0::Sigmoid> node,
const InferenceEngine::InputInfo& info,
size_t port) {
return Activation::generate(info, node->get_input_element_type(0).is_signed());
}
InferenceEngine::Blob::Ptr generate(const std::shared_ptr<ngraph::op::v0::Sign> node,
const InferenceEngine::InputInfo& info,
size_t port) {
return Activation::generate(info, node->get_input_element_type(0).is_signed());
}
InferenceEngine::Blob::Ptr generate(const std::shared_ptr<ngraph::op::v0::Sin> node,
const InferenceEngine::InputInfo& info,
size_t port) {
return Activation::generate(info, node->get_input_element_type(0).is_signed());
}
InferenceEngine::Blob::Ptr generate(const std::shared_ptr<ngraph::op::v0::Sinh> node,
const InferenceEngine::InputInfo& info,
size_t port) {
return Activation::generate(info, node->get_input_element_type(0).is_signed());
}
InferenceEngine::Blob::Ptr generate(const std::shared_ptr<ngraph::op::v0::Sqrt> node,
const InferenceEngine::InputInfo& info,
size_t port) {
return Activation::generate(info, node->get_input_element_type(0).is_signed(), 1, 20);
}
InferenceEngine::Blob::Ptr generate(const std::shared_ptr<ngraph::op::v0::Tan> node,
const InferenceEngine::InputInfo& info,
size_t port) {
return Activation::generate(info, node->get_input_element_type(0).is_signed());
}
InferenceEngine::Blob::Ptr generate(const std::shared_ptr<ngraph::op::v0::Tanh> node,
const InferenceEngine::InputInfo& info,
size_t port) {
return Activation::generate(info, node->get_input_element_type(0).is_signed());
}
InferenceEngine::Blob::Ptr generate(const std::shared_ptr<ngraph::op::v1::Divide> node,
const InferenceEngine::InputInfo& info,
size_t port) {
return info.getPrecision().is_float() ? FuncTestUtils::createAndFillBlob(info.getTensorDesc(), 2, 2, 128):
FuncTestUtils::createAndFillBlob(info.getTensorDesc(), 100, 101);
}
InferenceEngine::Blob::Ptr generate(const std::shared_ptr<ngraph::op::v1::FloorMod> node,
const InferenceEngine::InputInfo& info,
size_t port) {
return info.getPrecision().is_float() ? FuncTestUtils::createAndFillBlob(info.getTensorDesc(), 2, 2, 128):
FuncTestUtils::createAndFillBlob(info.getTensorDesc(), 4, 2);
}
InferenceEngine::Blob::Ptr generate(const std::shared_ptr<ngraph::op::v1::GatherTree> node,
const InferenceEngine::InputInfo& info,
size_t port) {
auto& shape = node->get_input_shape(0);
auto maxBeamIndx = shape.at(2) - 1;
switch (port) {
case 2:
case 3:
return FuncTestUtils::createAndFillBlob(info.getTensorDesc(), maxBeamIndx, maxBeamIndx / 2);
default:
return FuncTestUtils::createAndFillBlob(info.getTensorDesc(), maxBeamIndx);
}
}
InferenceEngine::Blob::Ptr generate(const std::shared_ptr<ngraph::op::v1::LogicalAnd> node,
const InferenceEngine::InputInfo& info,
size_t port) {
return FuncTestUtils::createAndFillBlob(info.getTensorDesc(), 2, 0);
}
InferenceEngine::Blob::Ptr generate(const std::shared_ptr<ngraph::op::v1::LogicalNot> node,
const InferenceEngine::InputInfo& info,
size_t port) {
return FuncTestUtils::createAndFillBlob(info.getTensorDesc(), 2, 0);
}
InferenceEngine::Blob::Ptr generate(const std::shared_ptr<ngraph::op::v1::LogicalOr> node,
const InferenceEngine::InputInfo& info,
size_t port) {
return FuncTestUtils::createAndFillBlob(info.getTensorDesc(), 2, 0);
}
InferenceEngine::Blob::Ptr generate(const std::shared_ptr<ngraph::op::v1::LogicalXor> node,
const InferenceEngine::InputInfo& info,
size_t port) {
return FuncTestUtils::createAndFillBlob(info.getTensorDesc(), 2, 0);
}
InferenceEngine::Blob::Ptr generate(const std::shared_ptr<ngraph::op::v1::Power> node,
const InferenceEngine::InputInfo& info,
size_t port) {
return info.getPrecision().is_float() ? FuncTestUtils::createAndFillBlob(info.getTensorDesc(), 2, 2, 128):
FuncTestUtils::createAndFillBlob(info.getTensorDesc(), 4, 2);
}
namespace ReduceOps {
InferenceEngine::Blob::Ptr generate(const ngraph::AxisSet& axis_vec,
const InferenceEngine::InputInfo& info) {
IE_ASSERT(axis_vec.size() == 1);
auto axis = *axis_vec.begin();
auto td = info.getTensorDesc();
auto dims = td.getDims();
// Slice of tensor through axis is {1, 0, 0, ....}, the mean value is 1/slice_size
auto raw_values = std::vector<float>(dims[axis], 0);
raw_values[0] = 1;
auto blob = make_blob_with_precision(td);
blob->allocate();
CommonTestUtils::fill_data_with_broadcast(blob, axis, raw_values);
return blob;
}
} // namespace ReduceOps
InferenceEngine::Blob::Ptr generate(const std::shared_ptr<ngraph::op::v1::ReduceLogicalAnd> node,
const InferenceEngine::InputInfo& info,
size_t port) {
return ReduceOps::generate(node->get_reduction_axes(), info);
}
InferenceEngine::Blob::Ptr generate(const std::shared_ptr<ngraph::op::v1::ReduceLogicalOr> node,
const InferenceEngine::InputInfo& info,
size_t port) {
return ReduceOps::generate(node->get_reduction_axes(), info);
}
InferenceEngine::Blob::Ptr generate(const std::shared_ptr<ngraph::op::v1::ReduceMax> node,
const InferenceEngine::InputInfo& info,
size_t port) {
return ReduceOps::generate(node->get_reduction_axes(), info);
}
InferenceEngine::Blob::Ptr generate(const std::shared_ptr<ngraph::op::v1::ReduceMean> node,
const InferenceEngine::InputInfo& info,
size_t port) {
return ReduceOps::generate(node->get_reduction_axes(), info);
}
InferenceEngine::Blob::Ptr generate(const std::shared_ptr<ngraph::op::v1::ReduceMin> node,
const InferenceEngine::InputInfo& info,
size_t port) {
return ReduceOps::generate(node->get_reduction_axes(), info);
}
InferenceEngine::Blob::Ptr generate(const std::shared_ptr<ngraph::op::v1::ReduceProd> node,
const InferenceEngine::InputInfo& info,
size_t port) {
return ReduceOps::generate(node->get_reduction_axes(), info);
}
InferenceEngine::Blob::Ptr generate(const std::shared_ptr<ngraph::op::v1::ReduceSum> node,
const InferenceEngine::InputInfo& info,
size_t port) {
return ReduceOps::generate(node->get_reduction_axes(), info);
}
InferenceEngine::Blob::Ptr generate(const std::shared_ptr<ngraph::op::v3::Bucketize> node,
const InferenceEngine::InputInfo& info,
size_t port) {
InferenceEngine::Blob::Ptr blobPtr;
switch (port) {
case 0: {
auto data_shape = info.getTensorDesc().getDims();
auto data_size = std::accumulate(begin(data_shape), end(data_shape), 1, std::multiplies<uint64_t>());
return FuncTestUtils::createAndFillBlob(info.getTensorDesc(), data_size * 5, 0, 10, 7235346);
}
case 1: {
return FuncTestUtils::createAndFillBlobUniqueSequence(info.getTensorDesc(), 0, 10, 8234231);
}
default:
return FuncTestUtils::createAndFillBlob(info.getTensorDesc());
}
}
InferenceEngine::Blob::Ptr generate(const std::shared_ptr<ngraph::op::v3::ROIAlign> node,
const InferenceEngine::InputInfo& info,
size_t port) {
const auto& inputShape = node->get_input_shape(0);
switch (port) {
case 1: {
std::vector<float> blobData(node->get_shape()[0] * 4);
ROIAlignLayerTest::fillCoordTensor(blobData,
inputShape[2],
inputShape[3],
node->get_spatial_scale(),
node->get_sampling_ratio(),
node->get_pooled_h(),
node->get_pooled_w());
return FuncTestUtils::createAndFillBlobWithFloatArray(info.getTensorDesc(), blobData.data(), blobData.size());
}
case 2: {
std::vector<int> roiIdxVector(node->get_shape()[0]);
ROIAlignLayerTest::fillIdxTensor(roiIdxVector, node->get_shape()[0]);
return FuncTestUtils::createAndFillBlobWithFloatArray(info.getTensorDesc(), roiIdxVector.data(), roiIdxVector.size());
}
default:
return FuncTestUtils::createAndFillBlob(info.getTensorDesc());
}
}
InferenceEngine::Blob::Ptr generate(const std::shared_ptr<ngraph::op::v4::HSwish> node,
const InferenceEngine::InputInfo& info,
size_t port) {
return Activation::generate(info, node->get_input_element_type(0).is_signed());
}
InferenceEngine::Blob::Ptr generate(const std::shared_ptr<ngraph::op::v4::Mish> node,
const InferenceEngine::InputInfo& info,
size_t port) {
return Activation::generate(info, node->get_input_element_type(0).is_signed());
}
InferenceEngine::Blob::Ptr generate(const std::shared_ptr<ngraph::op::v4::Proposal> node,
const InferenceEngine::InputInfo& info,
size_t port) {
if (port == 0) {
return FuncTestUtils::createAndFillBlobFloat(info.getTensorDesc(), 1, 0, 1000, 8234231);
}
return FuncTestUtils::createAndFillBlobFloatNormalDistribution(info.getTensorDesc(), 0.0f, 0.2f, 7235346);
}
InferenceEngine::Blob::Ptr generate(const std::shared_ptr<ngraph::op::v4::ReduceL1> node,
const InferenceEngine::InputInfo& info,
size_t port) {
return ReduceOps::generate(node->get_reduction_axes(), info);
}
InferenceEngine::Blob::Ptr generate(const std::shared_ptr<ngraph::op::v4::ReduceL2> node,
const InferenceEngine::InputInfo& info,
size_t port) {
return ReduceOps::generate(node->get_reduction_axes(), info);
}
InferenceEngine::Blob::Ptr generate(const std::shared_ptr<ngraph::op::v4::SoftPlus> node,
const InferenceEngine::InputInfo& info,
size_t port) {
return Activation::generate(info, node->get_input_element_type(0).is_signed());
}
InferenceEngine::Blob::Ptr generate(const std::shared_ptr<ngraph::op::v4::Swish> node,
const InferenceEngine::InputInfo& info,
size_t port) {
return Activation::generate(info, node->get_input_element_type(0).is_signed());
}
InferenceEngine::Blob::Ptr generate(const ngraph::op::v5::BatchNormInference node,
const InferenceEngine::InputInfo& info,
size_t port) {
return FuncTestUtils::createAndFillBlobConsistently(info.getTensorDesc(), 3, 0, 1);
}
InferenceEngine::Blob::Ptr generate(const ngraph::op::v5::GRUSequence node,
const InferenceEngine::InputInfo& info,
size_t port) {
if (port == 2) {
unsigned int m_max_seq_len = 10;
return FuncTestUtils::createAndFillBlob(info.getTensorDesc(), m_max_seq_len, 0);
}
return FuncTestUtils::createAndFillBlob(info.getTensorDesc());
}
InferenceEngine::Blob::Ptr generate(const std::shared_ptr<ngraph::op::v5::HSigmoid> node,
const InferenceEngine::InputInfo& info,
size_t port) {
return Activation::generate(info, node->get_input_element_type(0).is_signed());
}
InferenceEngine::Blob::Ptr generate(const ngraph::op::v5::Loop node,
const InferenceEngine::InputInfo& info,
size_t port) {
auto tdesc = info.getTensorDesc();
auto blob = make_blob_with_precision(tdesc);
blob->allocate();
if (tdesc.getLayout() == InferenceEngine::SCALAR) {
auto scalar_1d = CommonTestUtils::make_reshape_view(blob, {1});
unsigned int max_iter_num = 10;
CommonTestUtils::fill_data_with_broadcast(scalar_1d, 0, {static_cast<float>(max_iter_num)});
} else {
int start_value = 7;
CommonTestUtils::fill_data_with_broadcast(blob, 0, {static_cast<float>(start_value)});
}
return blob;
}
InferenceEngine::Blob::Ptr generate(const ngraph::op::v5::LSTMSequence node,
const InferenceEngine::InputInfo& info,
size_t port) {
if (port == 2) {
unsigned int m_max_seq_len = 10;
return FuncTestUtils::createAndFillBlob(info.getTensorDesc(), m_max_seq_len, 0);
}
return FuncTestUtils::createAndFillBlob(info.getTensorDesc());
}
InferenceEngine::Blob::Ptr generate(const ngraph::op::v5::NonMaxSuppression node,
const InferenceEngine::InputInfo& info,
size_t port) {
std::cout << "lklkllll" << std::endl;
if (port == 1) {
InferenceEngine::Blob::Ptr blob;
blob = make_blob_with_precision(info.getTensorDesc());
blob->allocate();
CommonTestUtils::fill_data_random_float<InferenceEngine::Precision::FP32>(blob, 1, 0, 1000);
return blob;
}
return FuncTestUtils::createAndFillBlob(info.getTensorDesc());
}
InferenceEngine::Blob::Ptr generate(const ngraph::op::v5::RNNSequence node,
const InferenceEngine::InputInfo& info,
size_t port) {
if (port == 2) {
unsigned int m_max_seq_len = 10;
return FuncTestUtils::createAndFillBlob(info.getTensorDesc(), m_max_seq_len, 0);
}
return FuncTestUtils::createAndFillBlob(info.getTensorDesc());
}
InferenceEngine::Blob::Ptr generate(const std::shared_ptr<ngraph::op::v5::Round> node,
const InferenceEngine::InputInfo& info,
size_t port) {
return Activation::generate(info, node->get_input_element_type(0).is_signed(), -10, 20, 4);
}
template<typename T>
InferenceEngine::Blob::Ptr generateInput(const std::shared_ptr<ngraph::Node> node,
const InferenceEngine::InputInfo& info,
size_t port) {
return generate(ngraph::as_type_ptr<T>(node), info, port);
}
} // namespace
InputsMap getInputMap() {
static InputsMap inputsMap{
#define NGRAPH_OP(NAME, NAMESPACE) {NAMESPACE::NAME::type_info, generateInput<NAMESPACE::NAME>},
#include "ngraph/opsets/opset1_tbl.hpp"
#include "ngraph/opsets/opset2_tbl.hpp"
#include "ngraph/opsets/opset3_tbl.hpp"
#include "ngraph/opsets/opset4_tbl.hpp"
#include "ngraph/opsets/opset5_tbl.hpp"
#include "ngraph/opsets/opset6_tbl.hpp"
#undef NGRAPH_OP
};
return inputsMap;
}
} // namespace LayerTestsDefinitions

View File

@ -0,0 +1,89 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "functional_test_utils/core_config.hpp"
#include "shared_test_classes/read_ir/read_ir.hpp"
#include "shared_test_classes/read_ir/compare_results.hpp"
#include "shared_test_classes/read_ir/generate_inputs.hpp"
namespace LayerTestsDefinitions {
std::string ReadIRTest::getTestCaseName(const testing::TestParamInfo<std::tuple<std::string, std::string>>& obj) {
std::string pathToModel, deviceName;
std::tie(pathToModel, deviceName) = obj.param;
std::ostringstream result;
result << "ModelPath=" << pathToModel << "_";
result << "TargetDevice=" << deviceName << "_";
return result.str();
}
void ReadIRTest::SetUp() {
std::tie(pathToModel, targetDevice) = this->GetParam();
cnnNetwork = getCore()->ReadNetwork(pathToModel);
function = cnnNetwork.getFunction();
}
void ReadIRTest::GenerateInputs() {
auto inputMap = getInputMap();
const auto& inputsInfo = executableNetwork.GetInputsInfo();
for (const auto& param : function->get_parameters()) {
const auto infoIt = inputsInfo.find(param->get_friendly_name());
GTEST_ASSERT_NE(infoIt, inputsInfo.cend());
const auto& info = infoIt->second;
for (size_t i = 0; i < param->get_output_size(); i++) {
for (const auto& node : param->get_output_target_inputs(i)) {
const auto nodePtr = node.get_node()->shared_from_this();
auto it = inputMap.find(nodePtr->get_type_info());
for (size_t port = 0; port < nodePtr->get_input_size(); ++port) {
if (nodePtr->get_input_node_ptr(port)->shared_from_this() == param->shared_from_this()) {
inputs.push_back(it->second(nodePtr, *info, port));
}
}
}
}
}
}
void ReadIRTest::Compare(const std::vector<std::vector<std::uint8_t>> &expected,
const std::vector<InferenceEngine::Blob::Ptr> &actual) {
auto compareMap = getCompareMap();
for (const auto& result : function->get_results()) {
for (size_t i = 0; i < result->get_input_size(); ++i) {
const auto inputNode = result->get_input_node_shared_ptr(i);
auto it = compareMap.find(inputNode->get_type_info());
it->second(inputNode, expected, actual, threshold);
}
}
}
std::vector<InferenceEngine::Blob::Ptr> ReadIRTest::GetOutputs() {
std::vector<InferenceEngine::Blob::Ptr> outputs;
// CNNNetworkNGraphImpl::getOVNameForTensor works incorrect: _tensorNames is empty
// for (const auto& result : function->get_results()) {
// outputs.push_back(inferRequest.GetBlob(cnnNetwork.getOVNameForTensor(result->get_friendly_name())));
// }
for (const auto& result : function->get_results()) {
for (size_t inPort = 0; inPort < result->get_input_size(); ++inPort) {
const auto& inputNode = result->get_input_node_shared_ptr(inPort);
for (size_t outPort = 0; outPort < inputNode->get_output_size(); ++outPort) {
for (const auto& out : inputNode->get_output_target_inputs(outPort)) {
if (out.get_node()->shared_from_this() == result) {
std::string name = inputNode->get_friendly_name();
if (inputNode->get_output_size() > 1) {
name += "." + std::to_string(outPort);
}
outputs.push_back(inferRequest.GetBlob(name));
break;
}
}
}
}
}
return outputs;
}
} // namespace LayerTestsDefinitions

View File

@ -42,7 +42,7 @@ static int randInt(int low, int high) {
return dis(gen);
}
static void fillROITensor(float* buffer, int numROIs, int batchSize,
void PSROIPoolingLayerTest::fillROITensor(float* buffer, int numROIs, int batchSize,
int height, int width, int groupSize,
float spatialScale, int spatialBinsX, int spatialBinsY, const std::string& mode) {
int minRoiWidth = groupSize;
@ -61,8 +61,8 @@ static void fillROITensor(float* buffer, int numROIs, int batchSize,
}
int batchId = 0;
for (int i = 0; i < numROIs; i++) {
int sizeX = std::min(width, randInt(minRoiWidth, maxRoiWidth));
int sizeY = std::min(height, randInt(minRoiHeight, maxRoiHeight));
int sizeX = std::min(width, randInt(std::min(minRoiWidth, maxRoiWidth), std::max(minRoiWidth, maxRoiWidth)));
int sizeY = std::min(height, randInt(std::min(minRoiWidth, maxRoiWidth), std::max(minRoiWidth, maxRoiWidth)));
int startX = randInt(0, std::max(1, width - sizeX - 1));
int startY = randInt(0, std::max(1, height - sizeY - 1));

View File

@ -47,8 +47,8 @@ static int randInt(int low, int high) {
return dis(gen);
}
static void fillCoordTensor(std::vector<float> & coords, int height, int width,
float spatialScale, int pooledRatio, int pooledH, int pooledW) {
void ROIAlignLayerTest::fillCoordTensor(std::vector<float>& coords, int height, int width,
float spatialScale, int pooledRatio, int pooledH, int pooledW) {
int minRoiWidth = pooledW;
int maxRoiWidth = width / pooledRatio;
int minRoiHeight = pooledH;
@ -66,7 +66,7 @@ static void fillCoordTensor(std::vector<float> & coords, int height, int width,
coords[i * 4 + 3] = (startY + sizeY - 1) / spatialScale;
}
}
static void fillIdxTensor(std::vector<int> & idx, int batchSize) {
void ROIAlignLayerTest::fillIdxTensor(std::vector<int>& idx, int batchSize) {
int batchId = 0;
for (int i = 0; i < idx.size(); i++) {
idx[i] = batchId;

View File

@ -323,8 +323,8 @@ void inline fill_data_normal_random_float(InferenceEngine::Blob::Ptr &blob,
}
}
template<InferenceEngine::Precision::ePrecision PRC>
void inline fill_data_float_array(InferenceEngine::Blob::Ptr &blob, const float values[], const size_t size) {
template<InferenceEngine::Precision::ePrecision PRC, typename T>
void inline fill_data_float_array(InferenceEngine::Blob::Ptr &blob, const T values[], const size_t size) {
using dataType = typename InferenceEngine::PrecisionTrait<PRC>::value_type;
auto *rawBlobDataPtr = blob->buffer().as<dataType *>();

View File

@ -3,6 +3,7 @@
//
#pragma once
#include <regex>
#include <fstream>
#include <string>
#include <vector>
@ -116,6 +117,7 @@ inline bool directoryExists(const std::string &path) {
return false;
}
inline void directoryFileListRecursive(const std::string& name, std::vector<std::string>& file_list) {
struct CloseDir {
void operator()(DIR* d) const noexcept {
@ -171,6 +173,31 @@ inline int createDirectoryRecursive(const std::string& dirPath) {
return 0;
}
inline std::vector<std::string> getFileListByPatternRecursive(const std::vector<std::string>& folderPaths,
const std::regex& pattern) {
auto getFileListByPattern = [&pattern](const std::string& folderPath) {
std::vector<std::string> allFilePaths;
CommonTestUtils::directoryFileListRecursive(folderPath, allFilePaths);
std::set<std::string> result;
for (auto& filePath : allFilePaths) {
if (CommonTestUtils::fileExists(filePath) && std::regex_match(filePath, pattern)) {
result.insert(filePath);
}
}
return result;
};
std::vector<std::string> result;
for (auto &&folderPath : folderPaths) {
if (!CommonTestUtils::directoryExists(folderPath)) {
continue;
}
auto fileListByPattern = getFileListByPattern(folderPath);
result.insert(result.end(), fileListByPattern.begin(), fileListByPattern.end());
}
return result;
}
inline std::string replaceExt(std::string file, const std::string& newExt) {
std::string::size_type i = file.rfind('.', file.length());

View File

@ -1,4 +1,4 @@
// Copyright (C) 2019 Intel Corporation
// Copyright (C) 2019-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
@ -16,6 +16,8 @@ const char DEVICE_MULTI[] = "MULTI";
const char DEVICE_TEMPLATE[] = "TEMPLATE";
const char DEVICE_HETERO[] = "HETERO";
const char REPORT_FILENAME[] = "report.xml";
#ifdef _WIN32
#ifdef __MINGW32__
const char pre[] = "lib";

View File

@ -436,13 +436,14 @@ inline InferenceEngine::Blob::Ptr createAndFillBlobFloat(const InferenceEngine::
return blob;
}
template<typename T>
inline InferenceEngine::Blob::Ptr createAndFillBlobWithFloatArray(const InferenceEngine::TensorDesc &td,
const float values[],
const T values[],
const int size) {
InferenceEngine::Blob::Ptr blob = make_blob_with_precision(td);
blob->allocate();
switch (td.getPrecision()) {
#define CASE(X) case X: CommonTestUtils::fill_data_float_array<X>(blob, values, size); break;
#define CASE(X) case X: CommonTestUtils::fill_data_float_array<X, T>(blob, values, size); break;
CASE(InferenceEngine::Precision::FP32)
CASE(InferenceEngine::Precision::FP16)
CASE(InferenceEngine::Precision::U8)

View File

@ -46,7 +46,7 @@
{% if op in results[d] -%}
<td>
{{ results[d][op].passrate }}% (<span style="color: green">p:{{ results[d][op].passed }}</span>,
<span style="color: red">f:{{ results[d][op].failed }}</span>,s:{{ results[d][op].skipped }})
<span style="color: red">f:{{ results[d][op].failed }}</span>,s:{{ results[d][op].skipped }}</span>,c:{{ results[d][op].crashed }})
</td>
{% else -%}
<td class="table-warning">No tests</td>