From 8e080d4504945211d11fbc345bed9e4673172733 Mon Sep 17 00:00:00 2001 From: Irina Efode Date: Mon, 15 Mar 2021 12:20:53 +0300 Subject: [PATCH] [IE TESTS][Conformance] Add conformance tests runner (#3694) * [IE TESTS][Conformance] Add conformance test target * Handle creashes. Added crashed tests to the report * Fix report and flags * Apply comments * Apply comment * Remove flag, fix * copyrights * Generate in * Remove extra code * fix mac * CI fixes * fix win * Fixes * Input fixes * fix flag * Add generate in * Fix compare * Fixes CI --- .../tests/functional/plugin/CMakeLists.txt | 1 - .../plugin/conformance/CMakeLists.txt | 3 +- .../conformance/test_runner/CMakeLists.txt | 33 + .../plugin/conformance/test_runner/README.md | 46 ++ .../test_runner/include/conformance.hpp | 10 + .../test_runner/include/gflag_config.hpp | 39 ++ .../test_runner/src/core_config.cpp | 8 + .../conformance/test_runner/src/main.cpp | 57 ++ .../conformance/test_runner/src/precomp.hpp | 36 + .../test_runner/src/read_ir/read_ir.cpp | 23 + .../test_runner/src/skip_tests_config.cpp | 12 + .../plugin/shared/include/read_ir/read_ir.hpp | 11 + .../functional/plugin/shared/src/main.cpp | 8 +- .../base/layer_test_utils.hpp | 35 +- .../read_ir/compare_results.hpp | 19 + .../read_ir/generate_inputs.hpp | 17 + .../shared_test_classes/read_ir/read_ir.hpp | 27 + .../single_layer/psroi_pooling.hpp | 3 + .../single_layer/roi_align.hpp | 3 + .../src/base/layer_test_utils.cpp | 142 +++- .../src/read_ir/compareResults.cpp | 207 ++++++ .../src/read_ir/generate_inputs.cpp | 660 ++++++++++++++++++ .../src/read_ir/read_ir.cpp | 89 +++ .../src/single_layer/psroi_pooling.cpp | 6 +- .../src/single_layer/roi_align.cpp | 6 +- .../common_test_utils/data_utils.hpp | 4 +- .../common_test_utils/file_utils.hpp | 27 + .../common_test_utils/test_constants.hpp | 4 +- .../functional_test_utils/blob_utils.hpp | 5 +- .../template/report_template.html | 2 +- 30 files changed, 1494 insertions(+), 49 deletions(-) create mode 100644 inference-engine/tests/functional/plugin/conformance/test_runner/CMakeLists.txt create mode 100644 inference-engine/tests/functional/plugin/conformance/test_runner/README.md create mode 100644 inference-engine/tests/functional/plugin/conformance/test_runner/include/conformance.hpp create mode 100644 inference-engine/tests/functional/plugin/conformance/test_runner/include/gflag_config.hpp create mode 100644 inference-engine/tests/functional/plugin/conformance/test_runner/src/core_config.cpp create mode 100644 inference-engine/tests/functional/plugin/conformance/test_runner/src/main.cpp create mode 100644 inference-engine/tests/functional/plugin/conformance/test_runner/src/precomp.hpp create mode 100644 inference-engine/tests/functional/plugin/conformance/test_runner/src/read_ir/read_ir.cpp create mode 100644 inference-engine/tests/functional/plugin/conformance/test_runner/src/skip_tests_config.cpp create mode 100644 inference-engine/tests/functional/plugin/shared/include/read_ir/read_ir.hpp create mode 100644 inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/read_ir/compare_results.hpp create mode 100644 inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/read_ir/generate_inputs.hpp create mode 100644 inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/read_ir/read_ir.hpp create mode 100644 inference-engine/tests/functional/shared_test_classes/src/read_ir/compareResults.cpp create mode 100644 inference-engine/tests/functional/shared_test_classes/src/read_ir/generate_inputs.cpp create mode 100644 inference-engine/tests/functional/shared_test_classes/src/read_ir/read_ir.cpp diff --git a/inference-engine/tests/functional/plugin/CMakeLists.txt b/inference-engine/tests/functional/plugin/CMakeLists.txt index db3d4da942b..b03e26e02e1 100644 --- a/inference-engine/tests/functional/plugin/CMakeLists.txt +++ b/inference-engine/tests/functional/plugin/CMakeLists.txt @@ -20,6 +20,5 @@ if (ENABLE_MYRIAD) add_subdirectory(myriad) endif() - add_subdirectory(conformance) diff --git a/inference-engine/tests/functional/plugin/conformance/CMakeLists.txt b/inference-engine/tests/functional/plugin/conformance/CMakeLists.txt index 7ce0af40247..6b48d13fe50 100644 --- a/inference-engine/tests/functional/plugin/conformance/CMakeLists.txt +++ b/inference-engine/tests/functional/plugin/conformance/CMakeLists.txt @@ -1,7 +1,8 @@ -# Copyright (C) 2020 Intel Corporation +# Copyright (C) 2021 Intel Corporation # # SPDX-License-Identifier: Apache-2.0 # +add_subdirectory(test_runner) add_subdirectory(subgraphs_dumper) add_subdirectory(subgraphs_dumper/tests) diff --git a/inference-engine/tests/functional/plugin/conformance/test_runner/CMakeLists.txt b/inference-engine/tests/functional/plugin/conformance/test_runner/CMakeLists.txt new file mode 100644 index 00000000000..13cea2b9a2b --- /dev/null +++ b/inference-engine/tests/functional/plugin/conformance/test_runner/CMakeLists.txt @@ -0,0 +1,33 @@ +# Copyright (C) 2021 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# + +set(TARGET_NAME conformanceTests) + +list(APPEND EXPORT_DEPENDENCIES + gflags + funcSharedTests + ) + +addIeTargetTest( + NAME ${TARGET_NAME} + ROOT "${CMAKE_CURRENT_SOURCE_DIR}/include" + ADDITIONAL_SOURCE_DIRS + ${CMAKE_CURRENT_SOURCE_DIR}/src + ADD_CPPLINT + INCLUDES + PUBLIC + "${CMAKE_CURRENT_SOURCE_DIR}/include" + LINK_LIBRARIES + PUBLIC + ${EXPORT_DEPENDENCIES} + DEPENDENCIES + ${EXPORT_DEPENDENCIES} + LABELS + CONFORMANCE +) + +ie_faster_build(${TARGET_NAME} + PCH PRIVATE "src/precomp.hpp" + ) diff --git a/inference-engine/tests/functional/plugin/conformance/test_runner/README.md b/inference-engine/tests/functional/plugin/conformance/test_runner/README.md new file mode 100644 index 00000000000..6b7c76c4567 --- /dev/null +++ b/inference-engine/tests/functional/plugin/conformance/test_runner/README.md @@ -0,0 +1,46 @@ +# Conformance test runner + +## Description +Conformance suit is a set of tests with parameters independent from plug-in specific and limitations. It contains: +* `ReadIR`. Allow to read IRs from folders recursive, infer it and compare results with reference. + +## How to build +Run the following command in build directory: +1. Generate CMake project: + ``` + cmake -DENABLE_FUNCTIONAL_TESTS=ON .. + ``` +2. Build the target: + ``` + make conformanceTests + ``` + +## How to run +The target is able to take the following command-line arguments: +* `-h` prints target command-line options with description. +* `--device` specifies target device. +* `--input_folders` specifies folders with IRs to run. The separator is `,`. +* `--disable_test_config` allows to ignore all skipped tests with the exception of `DISABLED_` prefix using. +* `--extend_report` allows not to re-write device results to the report (add results of this run to the existing). +* All `gtest` command-line parameters + +The result of execution is `report.xml` file. It demonstrates tests statistic like pass rate, passed, crashed, skipped and failed tests per operation for +devices. + +> **NOTE**: +> +> Using of GTest parallel tool to run `conformanceTests` helps to report crashed tests and collect correct statistic +> after unexpected crashes. +> +> The example of usage is: +> ``` +> python3 gtest_parallel.py /opt/repo/openvino/bin/intel64/Debug/conformanceTests -d . --gtest_filter=*1613473581844763495*:*roi_align*:*PSROIPooling*:*Add*:*BinaryConv* -- --input_folders=/opt/repo/roi_align,/opt/repo/omz/out --device=CPU +> ``` +> All arguments after `--` symbol is forwarding to `conformanceTests` target. + +## How to build operation coverage report +Run [the script](./../../../../ie_test_utils/functional_test_utils/layer_tests_summary/summarize.py) to generate `html` report. +The example of using the script is: +``` +python3 summarize.py --xml /opt/repo/infrastructure-master/thirdparty/gtest-parallel/report.xml --out /opt/repo/infrastructure-master/thirdparty/gtest-parallel/ +``` \ No newline at end of file diff --git a/inference-engine/tests/functional/plugin/conformance/test_runner/include/conformance.hpp b/inference-engine/tests/functional/plugin/conformance/test_runner/include/conformance.hpp new file mode 100644 index 00000000000..cecbe363546 --- /dev/null +++ b/inference-engine/tests/functional/plugin/conformance/test_runner/include/conformance.hpp @@ -0,0 +1,10 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +namespace ConformanceTests { + +extern const char* targetDevice; +extern std::vector IRFolderPaths; + +} // namespace ConformanceTests diff --git a/inference-engine/tests/functional/plugin/conformance/test_runner/include/gflag_config.hpp b/inference-engine/tests/functional/plugin/conformance/test_runner/include/gflag_config.hpp new file mode 100644 index 00000000000..aea07c937fd --- /dev/null +++ b/inference-engine/tests/functional/plugin/conformance/test_runner/include/gflag_config.hpp @@ -0,0 +1,39 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include + +static const char help_message[] = "Print a usage message."; +static const char disable_test_config_message[] = "Optional. Ignore tests skipping rules and run all the test (except those which are skipped with DISABLED " + "prefix)"; +static const char extend_report_config_message[] = "Optional. Extend operation coverage report without overwriting the device results."; +static const char target_device_message[] = "Required. Specify the target device for Conformance Test Suite " + "(the list of available devices is shown below). Default value is CPU. " + "Use \"-d HETERO:\" format to specify HETERO plugin. " + "The application looks for a suitable plugin for the specified device."; +static const char input_folders_message[] = "Required. Paths to the input folders with IRs. Delimiter is `,` symbol."; + +DEFINE_bool(h, false, help_message); +DEFINE_string(device, "CPU", target_device_message); +DEFINE_string(input_folders, ".", input_folders_message); +DEFINE_bool(disable_test_config, true, disable_test_config_message); +DEFINE_bool(extend_report, true, extend_report_config_message); + +/** +* @brief This function shows a help message +*/ +static void showUsage() { + std::cout << std::endl; + std::cout << "Conformance tests [OPTION]" << std::endl; + std::cout << "Options:" << std::endl; + std::cout << std::endl; + std::cout << " -h " << help_message << std::endl; + std::cout << " --disable_test_config " << disable_test_config_message << std::endl; + std::cout << " --extend_report " << extend_report_config_message << std::endl; + std::cout << " --device " << target_device_message << std::endl; + std::cout << " --input_folders \"\" " << input_folders_message << std::endl; +} \ No newline at end of file diff --git a/inference-engine/tests/functional/plugin/conformance/test_runner/src/core_config.cpp b/inference-engine/tests/functional/plugin/conformance/test_runner/src/core_config.cpp new file mode 100644 index 00000000000..0305c983e8d --- /dev/null +++ b/inference-engine/tests/functional/plugin/conformance/test_runner/src/core_config.cpp @@ -0,0 +1,8 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "functional_test_utils/core_config.hpp" + +void CoreConfiguration(LayerTestsUtils::LayerTestsCommon* test) { +} diff --git a/inference-engine/tests/functional/plugin/conformance/test_runner/src/main.cpp b/inference-engine/tests/functional/plugin/conformance/test_runner/src/main.cpp new file mode 100644 index 00000000000..ff67203994c --- /dev/null +++ b/inference-engine/tests/functional/plugin/conformance/test_runner/src/main.cpp @@ -0,0 +1,57 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "gtest/gtest.h" + +#include "shared_test_classes/base/layer_test_utils.hpp" + +#include "gflag_config.hpp" +#include "conformance.hpp" + +static std::vector splitStringByDelimiter(std::string str, const std::string& delimiter = ",") { + size_t delimiterPos; + std::vector irPaths; + while ((delimiterPos = str.find(delimiter)) != std::string::npos) { + irPaths.push_back(str.substr(0, delimiterPos)); + str = str.substr(delimiterPos + 1); + } + irPaths.push_back(str); + return irPaths; +} + +int main(int argc, char* argv[]) { + FuncTestUtils::SkipTestsConfig::disable_tests_skipping = true; + LayerTestsUtils::extendReport = true; + // Workaround for Gtest + Gflag + std::vector argv_gflags_vec; + int argc_gflags = 0; + for (int i = 0; i < argc; ++i) { + std::string arg(argv[i]); + if (arg.find("gtest") == std::string::npos) { + argv_gflags_vec.emplace_back(argv[i]); + argc_gflags++; + } + } + char** argv_gflags = argv_gflags_vec.data(); + + // ---------------------------Parsing and validation of input args-------------------------------------- + gflags::ParseCommandLineNonHelpFlags(&argc_gflags, &argv_gflags, true); + if (FLAGS_h) { + showUsage(); + return 0; + } + if (!FLAGS_disable_test_config) { + FuncTestUtils::SkipTestsConfig::disable_tests_skipping = false; + } + if (!FLAGS_extend_report) { + LayerTestsUtils::extendReport = false; + } + // ---------------------------Initialization of Gtest env ----------------------------------------------- + ConformanceTests::targetDevice = FLAGS_device.c_str(); + ConformanceTests::IRFolderPaths = splitStringByDelimiter(FLAGS_input_folders); + + ::testing::InitGoogleTest(&argc, argv); + ::testing::AddGlobalTestEnvironment(new LayerTestsUtils::TestEnvironment); + return RUN_ALL_TESTS();; +} diff --git a/inference-engine/tests/functional/plugin/conformance/test_runner/src/precomp.hpp b/inference-engine/tests/functional/plugin/conformance/test_runner/src/precomp.hpp new file mode 100644 index 00000000000..b7eb39ce5e6 --- /dev/null +++ b/inference-engine/tests/functional/plugin/conformance/test_runner/src/precomp.hpp @@ -0,0 +1,36 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include +#include +#include "ngraph_functions/builders.hpp" +#include "ngraph_functions/subgraph_builders.hpp" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include diff --git a/inference-engine/tests/functional/plugin/conformance/test_runner/src/read_ir/read_ir.cpp b/inference-engine/tests/functional/plugin/conformance/test_runner/src/read_ir/read_ir.cpp new file mode 100644 index 00000000000..2f9d5ef8640 --- /dev/null +++ b/inference-engine/tests/functional/plugin/conformance/test_runner/src/read_ir/read_ir.cpp @@ -0,0 +1,23 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "common_test_utils/file_utils.hpp" + +#include "read_ir/read_ir.hpp" + +namespace ConformanceTests { +using namespace LayerTestsDefinitions; + +const char* targetDevice = ""; +std::vector IRFolderPaths = {}; + +namespace { +INSTANTIATE_TEST_CASE_P(conformance, + ReadIRTest, + ::testing::Combine( + ::testing::ValuesIn(CommonTestUtils::getFileListByPatternRecursive(IRFolderPaths, std::regex(R"(.*\.xml)"))), + ::testing::Values(targetDevice)), + ReadIRTest::getTestCaseName); +} // namespace +} // namespace ConformanceTests diff --git a/inference-engine/tests/functional/plugin/conformance/test_runner/src/skip_tests_config.cpp b/inference-engine/tests/functional/plugin/conformance/test_runner/src/skip_tests_config.cpp new file mode 100644 index 00000000000..adefac9d0ac --- /dev/null +++ b/inference-engine/tests/functional/plugin/conformance/test_runner/src/skip_tests_config.cpp @@ -0,0 +1,12 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include + +#include "functional_test_utils/skip_tests_config.hpp" + +std::vector disabledTestPatterns() { + return {}; +} diff --git a/inference-engine/tests/functional/plugin/shared/include/read_ir/read_ir.hpp b/inference-engine/tests/functional/plugin/shared/include/read_ir/read_ir.hpp new file mode 100644 index 00000000000..6744fdffc47 --- /dev/null +++ b/inference-engine/tests/functional/plugin/shared/include/read_ir/read_ir.hpp @@ -0,0 +1,11 @@ +// Copyright (C) 2019-2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "shared_test_classes/read_ir/read_ir.hpp" + +namespace LayerTestsDefinitions { +TEST_P(ReadIRTest, ReadIR) { + Run(); +} +} // namespace LayerTestsDefinitions \ No newline at end of file diff --git a/inference-engine/tests/functional/plugin/shared/src/main.cpp b/inference-engine/tests/functional/plugin/shared/src/main.cpp index 1b3728c518c..a697c8b95dd 100644 --- a/inference-engine/tests/functional/plugin/shared/src/main.cpp +++ b/inference-engine/tests/functional/plugin/shared/src/main.cpp @@ -8,12 +8,14 @@ int main(int argc, char* argv[]) { FuncTestUtils::SkipTestsConfig::disable_tests_skipping = false; + LayerTestsUtils::extendReport = false; bool print_custom_help = false; for (int i = 0; i < argc; ++i) { if (std::string(argv[i]) == "--disable_tests_skipping") { FuncTestUtils::SkipTestsConfig::disable_tests_skipping = true; - } - if (std::string(argv[i]) == "--help") { + } else if (std::string(argv[i]) == "--extend_report") { + LayerTestsUtils::extendReport = true; + } else if (std::string(argv[i]) == "--help") { print_custom_help = true; } } @@ -22,6 +24,8 @@ int main(int argc, char* argv[]) { std::cout << " --disable_tests_skipping" << std::endl; std::cout << " Ignore tests skipping rules and run all the test" << std::endl; std::cout << " (except those which are skipped with DISABLED prefix)" << std::endl; + std::cout << " --extend_report" << std::endl; + std::cout << " Extend operation coverage report without overwriting the device results" << std::endl; std::cout << std::endl; } ::testing::InitGoogleTest(&argc, argv); diff --git a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/base/layer_test_utils.hpp b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/base/layer_test_utils.hpp index 9eddba04072..2f5af0ba98d 100644 --- a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/base/layer_test_utils.hpp +++ b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/base/layer_test_utils.hpp @@ -31,6 +31,8 @@ namespace LayerTestsUtils { +extern bool extendReport; + // filename length limitation due to Windows constraints (max 256 characters) constexpr std::size_t maxFileNameLength = 140; @@ -53,25 +55,28 @@ struct PassRate { enum Statuses { PASSED, FAILED, - SKIPPED + SKIPPED, + CRASHED }; unsigned long passed = 0; unsigned long failed = 0; unsigned long skipped = 0; + unsigned long crashed = 0; PassRate() = default; - PassRate(unsigned long p, unsigned long f, unsigned long s) { + PassRate(unsigned long p, unsigned long f, unsigned long s, unsigned long c) { passed = p; failed = f; skipped = s; + crashed = c; } float getPassrate() const { - if (passed + failed == 0) { + if (passed + failed + crashed == 0) { return 0.f; } else { - return passed * 100.f / (passed + failed + skipped); + return passed * 100.f / (passed + failed + skipped + crashed); } } }; @@ -96,6 +101,8 @@ protected: std::map getOPsStats() { return opsStats; } + std::map getOpStatisticFromReport(); + std::string getDeviceName() const { return deviceName; } void setDeviceName(std::string device) { deviceName = device; } @@ -113,9 +120,7 @@ public: class TestEnvironment : public ::testing::Environment { public: void TearDown() override; - -private: - std::string reportFileName = "report.xml"; + static void saveReport(); }; using TargetDevice = std::string; @@ -140,6 +145,14 @@ public: virtual void Serialize(); + static void Compare(const std::vector> &expected, + const std::vector &actual, + float threshold); + + static void Compare(const std::vector &expected, + const InferenceEngine::Blob::Ptr &actual, + float threshold); + virtual void Compare(const std::vector> &expectedOutputs, const std::vector &actualOutputs); @@ -155,9 +168,6 @@ public: std::string getRuntimePrecision(const std::string& layerName); -protected: - LayerTestsCommon(); - template static void Compare(const T *expected, const T *actual, std::size_t size, T threshold) { for (std::size_t i = 0; i < size; ++i) { @@ -177,6 +187,9 @@ protected: } } +protected: + LayerTestsCommon(); + RefMode GetRefMode() { return refMode; } @@ -211,7 +224,7 @@ protected: virtual std::vector> CalculateRefs(); - std::vector GetOutputs(); + virtual std::vector GetOutputs(); InferenceEngine::InferRequest inferRequest; diff --git a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/read_ir/compare_results.hpp b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/read_ir/compare_results.hpp new file mode 100644 index 00000000000..c48b558e5a6 --- /dev/null +++ b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/read_ir/compare_results.hpp @@ -0,0 +1,19 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include "ngraph/node.hpp" + +namespace LayerTestsDefinitions { + +using CompareMap = std::map node, + const std::vector>& expected, + const std::vector& actual, + float threshold)>>; + +CompareMap getCompareMap(); +} // namespace LayerTestsDefinitions diff --git a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/read_ir/generate_inputs.hpp b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/read_ir/generate_inputs.hpp new file mode 100644 index 00000000000..75982278981 --- /dev/null +++ b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/read_ir/generate_inputs.hpp @@ -0,0 +1,17 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include "ngraph/node.hpp" + +namespace LayerTestsDefinitions { +using InputsMap = std::map node, + const InferenceEngine::InputInfo& info, + size_t port)>>; + +InputsMap getInputMap(); +} // namespace LayerTestsDefinitions diff --git a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/read_ir/read_ir.hpp b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/read_ir/read_ir.hpp new file mode 100644 index 00000000000..2daae83ce0f --- /dev/null +++ b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/read_ir/read_ir.hpp @@ -0,0 +1,27 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include "shared_test_classes/base/layer_test_utils.hpp" +#include "shared_test_classes/single_layer/psroi_pooling.hpp" +#include "shared_test_classes/single_layer/roi_pooling.hpp" +#include "shared_test_classes/single_layer/roi_align.hpp" + +namespace LayerTestsDefinitions { +class ReadIRTest : public testing::WithParamInterface>, + public LayerTestsUtils::LayerTestsCommon { +public: + static std::string getTestCaseName(const testing::TestParamInfo> &obj); + +protected: + void SetUp() override; + void GenerateInputs() override; + void Compare(const std::vector> &expected, + const std::vector &actual) override; + std::vector GetOutputs() override; + +private: + std::string pathToModel; +}; +} // namespace LayerTestsDefinitions diff --git a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/psroi_pooling.hpp b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/psroi_pooling.hpp index 2f0654e28b0..6eda4295dc9 100644 --- a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/psroi_pooling.hpp +++ b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/psroi_pooling.hpp @@ -33,6 +33,9 @@ class PSROIPoolingLayerTest : public testing::WithParamInterface, public: static std::string getTestCaseName(testing::TestParamInfo obj); void GenerateInputs() override; + static void fillROITensor(float* buffer, int numROIs, int batchSize, + int height, int width, int groupSize, + float spatialScale, int spatialBinsX, int spatialBinsY, const std::string& mode); protected: void SetUp() override; diff --git a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/roi_align.hpp b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/roi_align.hpp index 3af70bcaad8..763cd2b9045 100644 --- a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/roi_align.hpp +++ b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/roi_align.hpp @@ -23,6 +23,9 @@ class ROIAlignLayerTest : public testing::WithParamInterface, virtual public LayerTestsUtils::LayerTestsCommon { public: static std::string getTestCaseName(testing::TestParamInfo obj); + static void fillCoordTensor(std::vector& coords, int height, int width, + float spatialScale, int pooledRatio, int pooledH, int pooledW); + static void fillIdxTensor(std::vector& idx, int batchSize); protected: void SetUp() override; diff --git a/inference-engine/tests/functional/shared_test_classes/src/base/layer_test_utils.cpp b/inference-engine/tests/functional/shared_test_classes/src/base/layer_test_utils.cpp index af18bdfc64f..c9b5c56eb07 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/base/layer_test_utils.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/base/layer_test_utils.cpp @@ -2,6 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // #include +#include #include #include @@ -16,6 +17,9 @@ namespace LayerTestsUtils { +bool isReported = false; +bool extendReport = true; + Summary *Summary::p_instance = nullptr; SummaryDestroyer Summary::destroyer; @@ -41,31 +45,67 @@ void Summary::updateOPsStats(ngraph::NodeTypeInfo op, PassRate::Statuses status) auto &passrate = it->second; switch (status) { case PassRate::PASSED: - passrate.passed += 1; + passrate.passed++; + passrate.crashed--; break; case PassRate::FAILED: - passrate.failed += 1; + passrate.failed++; + passrate.crashed--; break; case PassRate::SKIPPED: - passrate.skipped += 1; + passrate.skipped++; + break; + case PassRate::CRASHED: + passrate.crashed++; break; } } else { switch (status) { case PassRate::PASSED: - opsStats[op] = PassRate(1, 0, 0); + opsStats[op] = PassRate(1, 0, 0, 0); break; case PassRate::FAILED: - opsStats[op] = PassRate(0, 1, 0); + opsStats[op] = PassRate(0, 1, 0, 0); break; case PassRate::SKIPPED: - opsStats[op] = PassRate(0, 0, 1); + opsStats[op] = PassRate(0, 0, 1, 0); + break; + case PassRate::CRASHED: + opsStats[op] = PassRate(0, 0, 0, 1); break; } } } -void TestEnvironment::TearDown() { +std::map Summary::getOpStatisticFromReport() { + pugi::xml_document doc; + + std::ifstream file; + file.open(CommonTestUtils::REPORT_FILENAME); + + pugi::xml_node root; + doc.load_file(CommonTestUtils::REPORT_FILENAME); + root = doc.child("report"); + + pugi::xml_node resultsNode = root.child("results"); + pugi::xml_node currentDeviceNode = resultsNode.child(deviceName.c_str()); + std::map oldOpsStat; + for (auto &child : currentDeviceNode.children()) { + std::string entry = child.name(); + auto p = std::stoi(child.attribute("passed").value()); + auto f = std::stoi(child.attribute("failed").value()); + auto s = std::stoi(child.attribute("skipped").value()); + auto c = std::stoi(child.attribute("crashed").value()); + PassRate obj(p, f, s, c); + oldOpsStat.insert({entry, obj}); + } + return oldOpsStat; +} + +void TestEnvironment::saveReport() { + if (isReported) { + return; + } std::vector opsets; opsets.push_back(ngraph::get_opset1()); opsets.push_back(ngraph::get_opset2()); @@ -80,13 +120,13 @@ void TestEnvironment::TearDown() { opsInfo.insert(type_info_set.begin(), type_info_set.end()); } - auto &s = Summary::getInstance(); - auto stats = s.getOPsStats(); + auto &summary = Summary::getInstance(); + auto stats = summary.getOPsStats(); pugi::xml_document doc; std::ifstream file; - file.open(reportFileName); + file.open(CommonTestUtils::REPORT_FILENAME); time_t rawtime; struct tm *timeinfo; @@ -100,14 +140,14 @@ void TestEnvironment::TearDown() { pugi::xml_node root; if (file) { - doc.load_file(reportFileName.c_str()); + doc.load_file(CommonTestUtils::REPORT_FILENAME); root = doc.child("report"); //Ugly but shorter than to write predicate for find_atrribute() to update existing one root.remove_attribute("timestamp"); root.append_attribute("timestamp").set_value(timeNow); root.remove_child("ops_list"); - root.child("results").remove_child(s.deviceName.c_str()); + root.child("results").remove_child(summary.deviceName.c_str()); } else { root = doc.append_child("report"); root.append_attribute("timestamp").set_value(timeNow); @@ -122,19 +162,57 @@ void TestEnvironment::TearDown() { } pugi::xml_node resultsNode = root.child("results"); - pugi::xml_node currentDeviceNode = resultsNode.append_child(s.deviceName.c_str()); + pugi::xml_node currentDeviceNode = resultsNode.append_child(summary.deviceName.c_str()); + std::unordered_set opList; for (const auto &it : stats) { std::string name = std::string(it.first.name) + "-" + std::to_string(it.first.version); + opList.insert(name); pugi::xml_node entry = currentDeviceNode.append_child(name.c_str()); entry.append_attribute("passed").set_value(it.second.passed); entry.append_attribute("failed").set_value(it.second.failed); entry.append_attribute("skipped").set_value(it.second.skipped); + entry.append_attribute("crashed").set_value(it.second.crashed); entry.append_attribute("passrate").set_value(it.second.getPassrate()); } - bool result = doc.save_file(reportFileName.c_str()); - if (!result) { - std::cout << "Failed to write report to " << reportFileName << "!" << std::endl; + + if (extendReport && file) { + auto opStataFromReport = summary.getOpStatisticFromReport(); + for (auto& item : opStataFromReport) { + pugi::xml_node entry; + if (opList.find(item.first) == opList.end()) { + entry = currentDeviceNode.append_child(item.first.c_str()); + entry.append_attribute("passed").set_value(item.second.passed); + entry.append_attribute("failed").set_value(item.second.failed); + entry.append_attribute("skipped").set_value(item.second.skipped); + entry.append_attribute("crashed").set_value(item.second.crashed); + entry.append_attribute("passrate").set_value(item.second.getPassrate()); + } else { + entry = currentDeviceNode.child(item.first.c_str()); + auto p = std::stoi(entry.attribute("passed").value()) + item.second.passed; + auto f = std::stoi(entry.attribute("failed").value()) + item.second.failed; + auto s = std::stoi(entry.attribute("skipped").value()) + item.second.skipped; + auto c = std::stoi(entry.attribute("crashed").value()) + item.second.crashed; + PassRate obj(p, f, s, c); + + entry.attribute("passed").set_value(obj.passed); + entry.attribute("failed").set_value(obj.failed); + entry.attribute("skipped").set_value(obj.skipped); + entry.attribute("crashed").set_value(obj.crashed); + entry.attribute("passrate").set_value(obj.getPassrate()); + } + } } + + bool result = doc.save_file(CommonTestUtils::REPORT_FILENAME); + if (!result) { + std::cout << "Failed to write report to " << CommonTestUtils::REPORT_FILENAME << "!" << std::endl; + } else { + isReported = true; + } +} + +void TestEnvironment::TearDown() { + saveReport(); } LayerTestsCommon::LayerTestsCommon() : threshold(1e-2f) { @@ -172,9 +250,18 @@ void LayerTestsCommon::Run() { } }; + auto crashHandler = [](int errCode) { + TestEnvironment::saveReport(); + std::cout << "Unexpected application crash!" << std::endl; + std::abort(); + }; + signal(SIGSEGV, crashHandler); + if (FuncTestUtils::SkipTestsConfig::currentTestIsDisabled()) { reportStatus(PassRate::Statuses::SKIPPED); GTEST_SKIP() << "Disabled test due to configuration" << std::endl; + } else { + reportStatus(PassRate::Statuses::CRASHED); } try { @@ -225,8 +312,19 @@ void LayerTestsCommon::Serialize() { InferenceEngine::Blob::Ptr LayerTestsCommon::GenerateInput(const InferenceEngine::InputInfo &info) const { return FuncTestUtils::createAndFillBlob(info.getTensorDesc()); } +void LayerTestsCommon::Compare(const std::vector> &expectedOutputs, + const std::vector &actualOutputs, + float threshold) { + for (std::size_t outputIndex = 0; outputIndex < expectedOutputs.size(); ++outputIndex) { + const auto &expected = expectedOutputs[outputIndex]; + const auto &actual = actualOutputs[outputIndex]; + Compare(expected, actual, threshold); + } +} -void LayerTestsCommon::Compare(const std::vector &expected, const InferenceEngine::Blob::Ptr &actual) { +void LayerTestsCommon::Compare(const std::vector &expected, + const InferenceEngine::Blob::Ptr &actual, + float threshold) { ASSERT_EQ(expected.size(), actual->byteSize()); const auto &expectedBuffer = expected.data(); @@ -284,6 +382,10 @@ void LayerTestsCommon::Compare(const std::vector &expected, const } } +void LayerTestsCommon::Compare(const std::vector &expected, const InferenceEngine::Blob::Ptr &actual) { + Compare(expected, actual, threshold); +} + void LayerTestsCommon::Compare(const InferenceEngine::Blob::Ptr &expected, const InferenceEngine::Blob::Ptr &actual) { auto get_raw_buffer = [](const InferenceEngine::Blob::Ptr &blob) { auto memory = InferenceEngine::as(blob); @@ -438,11 +540,7 @@ std::vector LayerTestsCommon::GetOutputs() { void LayerTestsCommon::Compare(const std::vector> &expectedOutputs, const std::vector &actualOutputs) { - for (std::size_t outputIndex = 0; outputIndex < expectedOutputs.size(); ++outputIndex) { - const auto &expected = expectedOutputs[outputIndex]; - const auto &actual = actualOutputs[outputIndex]; - Compare(expected, actual); - } + Compare(expectedOutputs, actualOutputs, threshold); } void LayerTestsCommon::Validate() { diff --git a/inference-engine/tests/functional/shared_test_classes/src/read_ir/compareResults.cpp b/inference-engine/tests/functional/shared_test_classes/src/read_ir/compareResults.cpp new file mode 100644 index 00000000000..cda55906565 --- /dev/null +++ b/inference-engine/tests/functional/shared_test_classes/src/read_ir/compareResults.cpp @@ -0,0 +1,207 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "ngraph/ops.hpp" + +#include "shared_test_classes/base/layer_test_utils.hpp" +#include "shared_test_classes/read_ir/compare_results.hpp" + +namespace LayerTestsDefinitions { + +namespace { +void compare(const std::shared_ptr node, + const std::vector>& expected, + const std::vector& actual, + float threshold) { + LayerTestsUtils::LayerTestsCommon::Compare(expected, actual, threshold); +} + +void compare(const std::shared_ptr node, + const std::vector& expected, + const std::vector& actual, + float threshold) { + ASSERT_EQ(expected.size(), actual.front()->byteSize()); + + size_t expSize = 0; + size_t actSize = 0; + + const auto &expectedBuffer = expected.data(); + auto memory = InferenceEngine::as(actual.front()); + IE_ASSERT(memory); + const auto lockedMemory = memory->wmap(); + const auto actualBuffer = lockedMemory.as(); + + const float *expBuf = reinterpret_cast(expectedBuffer); + const float *actBuf = reinterpret_cast(actualBuffer); + for (size_t i = 0; i < actual.front()->size(); i+=7) { + if (expBuf[i] == -1) + break; + expSize += 7; + } + for (size_t i = 0; i < actual.front()->size(); i+=7) { + if (actBuf[i] == -1) + break; + actSize += 7; + } + ASSERT_EQ(expSize, actSize); + LayerTestsUtils::LayerTestsCommon::Compare(expBuf, actBuf, expSize, 1e-2f); +} + +namespace Proposal { +template +void Compare(const T *expected, const T *actual, std::size_t size, + T threshold, const std::size_t output_index, size_t& num_selected_boxes) { + for (std::size_t i = 0; i < size; ++i) { + const auto &ref = expected[i]; + const auto &res = actual[i]; + + // verify until first -1 appears in the 1st output. + if (output_index == 0 && + CommonTestUtils::ie_abs(ref - static_cast(-1)) <= threshold) { + // output0 shape = {x, 5} + // output1 shape = {x} + // setting the new_size for output1 verification + num_selected_boxes = i / 5; + return; + } + + const auto absoluteDifference = CommonTestUtils::ie_abs(res - ref); + if (absoluteDifference <= threshold) { + continue; + } + + const auto max = std::max(CommonTestUtils::ie_abs(res), + CommonTestUtils::ie_abs(ref)); + float diff = + static_cast(absoluteDifference) / static_cast(max); + ASSERT_TRUE(max != 0 && (diff <= static_cast(threshold))) + << "Relative comparison of values expected: " << ref + << " and actual: " << res << " at index " << i + << " with threshold " << threshold << " failed"; + } +} +} // namespace Proposal + +void compare(const std::shared_ptr node, + const std::vector>& expectedOutputs, + const std::vector& actualOutputs, + float threshold) { + size_t num_selected_boxes = 0; + for (std::size_t outputIndex = 0; outputIndex < expectedOutputs.size(); ++outputIndex) { + const auto &expected = expectedOutputs[outputIndex]; + const auto &actual = actualOutputs[outputIndex]; + ASSERT_EQ(expected.size(), actual->byteSize()); + const auto &expectedBuffer = expected.data(); + + auto memory = InferenceEngine::as(actual); + IE_ASSERT(memory); + const auto lockedMemory = memory->rmap(); + const auto actualBuffer = lockedMemory.as(); + + const auto &precision = actual->getTensorDesc().getPrecision(); + auto size = actual->size(); + + // verifying the first output if there was less proposals than space + // provided, + // num_selected_boxes was set, take this into consideration while verifying the 2nd + // output + if (outputIndex == 1 && num_selected_boxes) { + size = num_selected_boxes; + } + + switch (precision) { + case InferenceEngine::Precision::BF16: + Proposal::Compare( + reinterpret_cast(expectedBuffer), + reinterpret_cast(actualBuffer), size, + ngraph::bfloat16(threshold), outputIndex, num_selected_boxes); + break; + case InferenceEngine::Precision::FP16: + Proposal::Compare( + reinterpret_cast(expectedBuffer), + reinterpret_cast(actualBuffer), size, + ngraph::float16(threshold), outputIndex, num_selected_boxes); + break; + case InferenceEngine::Precision::FP32: + Proposal::Compare( + reinterpret_cast(expectedBuffer), + reinterpret_cast(actualBuffer), size, + threshold, outputIndex, num_selected_boxes); + break; + default: + FAIL() << "Comparator for " << precision << " precision isn't supported"; + } + } +} + +void compare(const std::shared_ptr node, + const std::vector>& expectedOutputs, + const std::vector& actualOutputs, + float threshold) { + for (int outputIndex = static_cast(expectedOutputs.size()) - 1; outputIndex >=0 ; outputIndex--) { + const auto& expected = expectedOutputs[outputIndex]; + const auto& actual = actualOutputs[outputIndex]; + + const auto &expectedBuffer = expected.data(); + auto memory = InferenceEngine::as(actual); + IE_ASSERT(memory); + const auto lockedMemory = memory->wmap(); + const auto actualBuffer = lockedMemory.as(); + + if (outputIndex == 2) { + if (expected.size() != actual->byteSize()) + throw std::runtime_error("Expected and actual size 3rd output have different size"); + } + + const auto &precision = actual->getTensorDesc().getPrecision(); + size_t size = expected.size() / actual->getTensorDesc().getPrecision().size(); + switch (precision) { + case InferenceEngine::Precision::FP32: { + LayerTestsUtils::LayerTestsCommon::Compare( + reinterpret_cast(expectedBuffer), reinterpret_cast(actualBuffer), size, threshold); + const auto fBuffer = lockedMemory.as(); + for (int i = size; i < actual->size(); i++) { + ASSERT_TRUE(fBuffer[i] == -1.f) << "Invalid default value: " << fBuffer[i] << " at index: " << i; + } + break; + } + case InferenceEngine::Precision::I32: { + LayerTestsUtils::LayerTestsCommon::Compare( + reinterpret_cast(expectedBuffer), reinterpret_cast(actualBuffer), size, 0); + const auto iBuffer = lockedMemory.as(); + for (int i = size; i < actual->size(); i++) { + ASSERT_TRUE(iBuffer[i] == -1) << "Invalid default value: " << iBuffer[i] << " at index: " << i; + } + break; + } + default: + FAIL() << "Comparator for " << precision << " precision isn't supported"; + } + } +} + +template +void compareResults(const std::shared_ptr node, + const std::vector>& expected, + const std::vector& actual, + float threshold) { + return compare(ngraph::as_type_ptr(node), expected, actual, threshold); +} +} // namespace + +CompareMap getCompareMap() { + CompareMap compareMap{ +#define NGRAPH_OP(NAME, NAMESPACE) {NAMESPACE::NAME::type_info, compareResults}, +#include "ngraph/opsets/opset1_tbl.hpp" +#include "ngraph/opsets/opset2_tbl.hpp" +#include "ngraph/opsets/opset3_tbl.hpp" +#include "ngraph/opsets/opset4_tbl.hpp" +#include "ngraph/opsets/opset5_tbl.hpp" +#include "ngraph/opsets/opset6_tbl.hpp" +#undef NGRAPH_OP + }; + return compareMap; +} + +} // namespace LayerTestsDefinitions diff --git a/inference-engine/tests/functional/shared_test_classes/src/read_ir/generate_inputs.cpp b/inference-engine/tests/functional/shared_test_classes/src/read_ir/generate_inputs.cpp new file mode 100644 index 00000000000..32aa66096f3 --- /dev/null +++ b/inference-engine/tests/functional/shared_test_classes/src/read_ir/generate_inputs.cpp @@ -0,0 +1,660 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "ngraph/ops.hpp" + +#include "shared_test_classes/single_layer/roi_align.hpp" +#include "shared_test_classes/single_layer/psroi_pooling.hpp" +#include "shared_test_classes/read_ir/generate_inputs.hpp" + +namespace LayerTestsDefinitions { + +namespace { +InferenceEngine::Blob::Ptr generate(const std::shared_ptr node, + const InferenceEngine::InputInfo& info, + size_t port) { + return FuncTestUtils::createAndFillBlob(info.getTensorDesc()); +} + +namespace Activation { +InferenceEngine::Blob::Ptr generate(const InferenceEngine::InputInfo& info, + bool inPrcSigned, + int32_t data_start_from = -10, + uint32_t data_range = 20, + int32_t resolution = 32768) { + if (!inPrcSigned) { + data_range = 15; + data_start_from = 0; + } + return FuncTestUtils::createAndFillBlob(info.getTensorDesc(), data_range, + data_start_from, + resolution); +} +} // namespace Activation + +InferenceEngine::Blob::Ptr generate(const std::shared_ptr node, + const InferenceEngine::InputInfo& info, + size_t port) { + return Activation::generate(info, node->get_input_element_type(0).is_signed()); +} + +InferenceEngine::Blob::Ptr generate(const std::shared_ptr node, + const InferenceEngine::InputInfo& info, + size_t port) { + return Activation::generate(info, node->get_input_element_type(0).is_signed(), -1, 2); +} + +InferenceEngine::Blob::Ptr generate(const std::shared_ptr node, + const InferenceEngine::InputInfo& info, + size_t port) { + return Activation::generate(info, node->get_input_element_type(0).is_signed(), -1, 2); +} + +InferenceEngine::Blob::Ptr generate(const std::shared_ptr node, + const InferenceEngine::InputInfo& info, + size_t port) { + return Activation::generate(info, node->get_input_element_type(0).is_signed(), -1, 2); +} + +InferenceEngine::Blob::Ptr generate(const std::shared_ptr node, + const InferenceEngine::InputInfo& info, + size_t port) { + return Activation::generate(info, node->get_input_element_type(0).is_signed(), -1000, 2000); +} + +InferenceEngine::Blob::Ptr generate(const std::shared_ptr node, + const InferenceEngine::InputInfo& info, + size_t port) { + return Activation::generate(info, node->get_input_element_type(0).is_signed()); +} + +InferenceEngine::Blob::Ptr generate(const std::shared_ptr node, + const InferenceEngine::InputInfo& info, + size_t port) { + return Activation::generate(info, node->get_input_element_type(0).is_signed()); +} + +InferenceEngine::Blob::Ptr generate(const std::shared_ptr node, + const InferenceEngine::InputInfo& info, + size_t port) { + return Activation::generate(info, node->get_input_element_type(0).is_signed()); +} + +InferenceEngine::Blob::Ptr generate(const std::shared_ptr node, + const InferenceEngine::InputInfo& info, + size_t port) { + InferenceEngine::Blob::Ptr blob; + blob = make_blob_with_precision(info.getTensorDesc()); + blob->allocate(); + + int32_t resolution = 1; + uint32_t range = 1; + switch (port) { + case 1: + case 3: + resolution = 1000; + break; + case 2: + if (node->get_attrs().normalized) { + resolution = 1000; + } else { + range = 10; + } + break; + default: + resolution = 10; + break; + } + CommonTestUtils::fill_data_random_float(blob, range, 0, resolution); + return blob; +} + +InferenceEngine::Blob::Ptr generate(const std::shared_ptr node, + const InferenceEngine::InputInfo& info, + size_t port) { + return Activation::generate(info, node->get_input_element_type(0).is_signed()); +} + +InferenceEngine::Blob::Ptr generate(const std::shared_ptr node, + const InferenceEngine::InputInfo& info, + size_t port) { + return Activation::generate(info, node->get_input_element_type(0).is_signed()); +} + +InferenceEngine::Blob::Ptr generate(const std::shared_ptr node, + const InferenceEngine::InputInfo& info, + size_t port) { + return Activation::generate(info, node->get_input_element_type(0).is_signed()); +} + +InferenceEngine::Blob::Ptr generate(const std::shared_ptr node, + const InferenceEngine::InputInfo& info, + size_t port) { + return Activation::generate(info, node->get_input_element_type(0).is_signed()); +} + +InferenceEngine::Blob::Ptr generate(const std::shared_ptr node, + const InferenceEngine::InputInfo& info, + size_t port) { + return Activation::generate(info, node->get_input_element_type(0).is_signed()); +} + +InferenceEngine::Blob::Ptr generate(const std::shared_ptr node, + const InferenceEngine::InputInfo& info, + size_t port) { + switch (port) { + case 1: { + std::vector alpha(node->get_input_shape(1).size(), 0.2f); + return FuncTestUtils::createAndFillBlobWithFloatArray(info.getTensorDesc(), alpha.data(), alpha.size()); + } + case 2: { + std::vector beta(node->get_input_shape(2).size(), 0.5f); + return FuncTestUtils::createAndFillBlobWithFloatArray(info.getTensorDesc(), beta.data(), beta.size()); + } + default: { + return Activation::generate(info, node->get_input_element_type(0).is_signed()); + } + } +} + +InferenceEngine::Blob::Ptr generate(const std::shared_ptr node, + const InferenceEngine::InputInfo& info, + size_t port) { + auto constShapes = node->get_input_shape(0); + int seed = 1; + size_t constDataSize = ngraph::shape_size(constShapes); + std::vector inputLowData, inputHighData, outputLowData, outputHighData; + inputLowData = NGraphFunctions::Utils::generateVector(constDataSize, 10, 1, seed); + if (node->get_levels() != 2) { + inputHighData = NGraphFunctions::Utils::generateVector(constDataSize, 10, 1, seed); + outputLowData = NGraphFunctions::Utils::generateVector(constDataSize, 10, 1, seed); + outputHighData = NGraphFunctions::Utils::generateVector(constDataSize, 10, 1, seed); + } else { + inputHighData = inputLowData; + outputLowData = NGraphFunctions::Utils::generateVector(constDataSize, 10, 1, seed); + outputHighData = NGraphFunctions::Utils::generateVector(constDataSize, 10, 1, seed); + + for (int i = 0; i < constDataSize; i++) { + if (outputLowData[i] > outputHighData[i]) { + outputLowData[i] = 1; + outputHighData[i] = 0; + } else { + outputLowData[i] = 0; + outputHighData[i] = 1; + } + } + } + + for (int i = 0; i < constDataSize; i++) { + inputLowData[i] = std::min(inputLowData[i], inputHighData[i]); + inputHighData[i] = std::max(inputLowData[i], inputHighData[i]); + if (inputLowData[i] == inputHighData[i]) + inputHighData[i] += 1; + } + + for (int i = 0; i < constDataSize; i++) { + outputLowData[i] = std::min(outputLowData[i], outputHighData[i]); + outputHighData[i] = std::max(outputLowData[i], outputHighData[i]); + if (outputLowData[i] == outputHighData[i]) + outputHighData[i] += 1; + } + switch (port) { + case 1: + return FuncTestUtils::createAndFillBlobWithFloatArray(info.getTensorDesc(), inputLowData.data(), inputLowData.size()); + case 2: + return FuncTestUtils::createAndFillBlobWithFloatArray(info.getTensorDesc(), inputHighData.data(), inputHighData.size()); + case 3: + return FuncTestUtils::createAndFillBlobWithFloatArray(info.getTensorDesc(), outputLowData.data(), outputLowData.size()); + case 4: + return FuncTestUtils::createAndFillBlobWithFloatArray(info.getTensorDesc(), outputHighData.data(), outputHighData.size()); + default: { + float resolution = 1.0f, min = +5.f, max = +25.f; + return FuncTestUtils::createAndFillBlob(info.getTensorDesc(), max - min, min, resolution, seed); + } + } +} + +InferenceEngine::Blob::Ptr generate(const std::shared_ptr node, + const InferenceEngine::InputInfo& info, + size_t port) { + return Activation::generate(info, node->get_input_element_type(0).is_signed(), 1, 20); +} + +InferenceEngine::Blob::Ptr generate(const std::shared_ptr node, + const InferenceEngine::InputInfo& info, + size_t port) { + return Activation::generate(info, node->get_input_element_type(0).is_signed()); +} + +InferenceEngine::Blob::Ptr generate(const std::shared_ptr node, + const InferenceEngine::InputInfo& info, + size_t port) { + switch (port) { + case 1: { + std::vector negativeSlope(node->get_input_shape(1).size(), -0.01f); + FuncTestUtils::createAndFillBlobWithFloatArray(info.getTensorDesc(), negativeSlope.data(), negativeSlope.size()); + } + default: { + return Activation::generate(info, node->get_input_element_type(0).is_signed()); + } + } +} + +InferenceEngine::Blob::Ptr generate(const std::shared_ptr node, + const InferenceEngine::InputInfo& info, + size_t port) { + const auto& inputShape = node->get_input_shape(0); + if (port == 1) { + InferenceEngine::Blob::Ptr blob; + blob = make_blob_with_precision(info.getTensorDesc()); + blob->allocate(); + PSROIPoolingLayerTest::fillROITensor(blob->buffer(), + blob->size() / 5, + inputShape[0], + inputShape[2], + inputShape[3], + node->get_group_size(), + node->get_spatial_scale(), + node->get_spatial_bins_x(), + node->get_spatial_bins_y(), + node->get_mode()); + return blob; + } + return FuncTestUtils::createAndFillBlob(info.getTensorDesc()); +} + +InferenceEngine::Blob::Ptr generate(const std::shared_ptr node, + const InferenceEngine::InputInfo& info, + size_t port) { + const auto& inputShape = node->get_input_shape(0); + if (port == 1) { + InferenceEngine::Blob::Ptr blob; + blob = make_blob_with_precision(info.getTensorDesc()); + blob->allocate(); + + CommonTestUtils::fill_data_roi(blob->buffer(), + blob->size(), + node->get_input_shape(0).front() - 1, + inputShape[2], + inputShape[3], + 1.0f, + node->get_method() == "max"); + return blob; + } + return FuncTestUtils::createAndFillBlob(info.getTensorDesc()); +} + +InferenceEngine::Blob::Ptr generate(const std::shared_ptr node, + const InferenceEngine::InputInfo& info, + size_t port) { + switch (port) { + case 1: { + std::vector alpha(node->get_input_shape(1).size(), 1.6732f); + return FuncTestUtils::createAndFillBlobWithFloatArray(info.getTensorDesc(), alpha.data(), alpha.size()); + } + case 2: { + std::vector lambda(node->get_input_shape(1).size(), 1.0507f); + return FuncTestUtils::createAndFillBlobWithFloatArray(info.getTensorDesc(), lambda.data(), lambda.size()); + } + default: + return Activation::generate(info, node->get_input_element_type(0).is_signed()); + } +} + +InferenceEngine::Blob::Ptr generate(const std::shared_ptr node, + const InferenceEngine::InputInfo& info, + size_t port) { + return Activation::generate(info, node->get_input_element_type(0).is_signed()); +} + +InferenceEngine::Blob::Ptr generate(const std::shared_ptr node, + const InferenceEngine::InputInfo& info, + size_t port) { + return Activation::generate(info, node->get_input_element_type(0).is_signed()); +} + +InferenceEngine::Blob::Ptr generate(const std::shared_ptr node, + const InferenceEngine::InputInfo& info, + size_t port) { + return Activation::generate(info, node->get_input_element_type(0).is_signed()); +} + +InferenceEngine::Blob::Ptr generate(const std::shared_ptr node, + const InferenceEngine::InputInfo& info, + size_t port) { + return Activation::generate(info, node->get_input_element_type(0).is_signed()); +} + +InferenceEngine::Blob::Ptr generate(const std::shared_ptr node, + const InferenceEngine::InputInfo& info, + size_t port) { + return Activation::generate(info, node->get_input_element_type(0).is_signed(), 1, 20); +} + +InferenceEngine::Blob::Ptr generate(const std::shared_ptr node, + const InferenceEngine::InputInfo& info, + size_t port) { + return Activation::generate(info, node->get_input_element_type(0).is_signed()); +} + +InferenceEngine::Blob::Ptr generate(const std::shared_ptr node, + const InferenceEngine::InputInfo& info, + size_t port) { + return Activation::generate(info, node->get_input_element_type(0).is_signed()); +} + +InferenceEngine::Blob::Ptr generate(const std::shared_ptr node, + const InferenceEngine::InputInfo& info, + size_t port) { + return info.getPrecision().is_float() ? FuncTestUtils::createAndFillBlob(info.getTensorDesc(), 2, 2, 128): + FuncTestUtils::createAndFillBlob(info.getTensorDesc(), 100, 101); +} + +InferenceEngine::Blob::Ptr generate(const std::shared_ptr node, + const InferenceEngine::InputInfo& info, + size_t port) { + return info.getPrecision().is_float() ? FuncTestUtils::createAndFillBlob(info.getTensorDesc(), 2, 2, 128): + FuncTestUtils::createAndFillBlob(info.getTensorDesc(), 4, 2); +} + +InferenceEngine::Blob::Ptr generate(const std::shared_ptr node, + const InferenceEngine::InputInfo& info, + size_t port) { + auto& shape = node->get_input_shape(0); + auto maxBeamIndx = shape.at(2) - 1; + + switch (port) { + case 2: + case 3: + return FuncTestUtils::createAndFillBlob(info.getTensorDesc(), maxBeamIndx, maxBeamIndx / 2); + default: + return FuncTestUtils::createAndFillBlob(info.getTensorDesc(), maxBeamIndx); + } +} + +InferenceEngine::Blob::Ptr generate(const std::shared_ptr node, + const InferenceEngine::InputInfo& info, + size_t port) { + return FuncTestUtils::createAndFillBlob(info.getTensorDesc(), 2, 0); +} + +InferenceEngine::Blob::Ptr generate(const std::shared_ptr node, + const InferenceEngine::InputInfo& info, + size_t port) { + return FuncTestUtils::createAndFillBlob(info.getTensorDesc(), 2, 0); +} + +InferenceEngine::Blob::Ptr generate(const std::shared_ptr node, + const InferenceEngine::InputInfo& info, + size_t port) { + return FuncTestUtils::createAndFillBlob(info.getTensorDesc(), 2, 0); +} + +InferenceEngine::Blob::Ptr generate(const std::shared_ptr node, + const InferenceEngine::InputInfo& info, + size_t port) { + return FuncTestUtils::createAndFillBlob(info.getTensorDesc(), 2, 0); +} + +InferenceEngine::Blob::Ptr generate(const std::shared_ptr node, + const InferenceEngine::InputInfo& info, + size_t port) { + return info.getPrecision().is_float() ? FuncTestUtils::createAndFillBlob(info.getTensorDesc(), 2, 2, 128): + FuncTestUtils::createAndFillBlob(info.getTensorDesc(), 4, 2); +} + +namespace ReduceOps { + InferenceEngine::Blob::Ptr generate(const ngraph::AxisSet& axis_vec, + const InferenceEngine::InputInfo& info) { + IE_ASSERT(axis_vec.size() == 1); + + auto axis = *axis_vec.begin(); + auto td = info.getTensorDesc(); + auto dims = td.getDims(); + + // Slice of tensor through axis is {1, 0, 0, ....}, the mean value is 1/slice_size + auto raw_values = std::vector(dims[axis], 0); + raw_values[0] = 1; + + auto blob = make_blob_with_precision(td); + blob->allocate(); + CommonTestUtils::fill_data_with_broadcast(blob, axis, raw_values); + return blob; + } +} // namespace ReduceOps + +InferenceEngine::Blob::Ptr generate(const std::shared_ptr node, + const InferenceEngine::InputInfo& info, + size_t port) { + return ReduceOps::generate(node->get_reduction_axes(), info); +} + +InferenceEngine::Blob::Ptr generate(const std::shared_ptr node, + const InferenceEngine::InputInfo& info, + size_t port) { + return ReduceOps::generate(node->get_reduction_axes(), info); +} + +InferenceEngine::Blob::Ptr generate(const std::shared_ptr node, + const InferenceEngine::InputInfo& info, + size_t port) { + return ReduceOps::generate(node->get_reduction_axes(), info); +} + +InferenceEngine::Blob::Ptr generate(const std::shared_ptr node, + const InferenceEngine::InputInfo& info, + size_t port) { + return ReduceOps::generate(node->get_reduction_axes(), info); +} + +InferenceEngine::Blob::Ptr generate(const std::shared_ptr node, + const InferenceEngine::InputInfo& info, + size_t port) { + return ReduceOps::generate(node->get_reduction_axes(), info); +} + +InferenceEngine::Blob::Ptr generate(const std::shared_ptr node, + const InferenceEngine::InputInfo& info, + size_t port) { + return ReduceOps::generate(node->get_reduction_axes(), info); +} + +InferenceEngine::Blob::Ptr generate(const std::shared_ptr node, + const InferenceEngine::InputInfo& info, + size_t port) { + return ReduceOps::generate(node->get_reduction_axes(), info); +} + +InferenceEngine::Blob::Ptr generate(const std::shared_ptr node, + const InferenceEngine::InputInfo& info, + size_t port) { + InferenceEngine::Blob::Ptr blobPtr; + switch (port) { + case 0: { + auto data_shape = info.getTensorDesc().getDims(); + auto data_size = std::accumulate(begin(data_shape), end(data_shape), 1, std::multiplies()); + return FuncTestUtils::createAndFillBlob(info.getTensorDesc(), data_size * 5, 0, 10, 7235346); + } + case 1: { + return FuncTestUtils::createAndFillBlobUniqueSequence(info.getTensorDesc(), 0, 10, 8234231); + } + default: + return FuncTestUtils::createAndFillBlob(info.getTensorDesc()); + } +} + +InferenceEngine::Blob::Ptr generate(const std::shared_ptr node, + const InferenceEngine::InputInfo& info, + size_t port) { + const auto& inputShape = node->get_input_shape(0); + switch (port) { + case 1: { + std::vector blobData(node->get_shape()[0] * 4); + ROIAlignLayerTest::fillCoordTensor(blobData, + inputShape[2], + inputShape[3], + node->get_spatial_scale(), + node->get_sampling_ratio(), + node->get_pooled_h(), + node->get_pooled_w()); + return FuncTestUtils::createAndFillBlobWithFloatArray(info.getTensorDesc(), blobData.data(), blobData.size()); + } + case 2: { + std::vector roiIdxVector(node->get_shape()[0]); + ROIAlignLayerTest::fillIdxTensor(roiIdxVector, node->get_shape()[0]); + return FuncTestUtils::createAndFillBlobWithFloatArray(info.getTensorDesc(), roiIdxVector.data(), roiIdxVector.size()); + } + default: + return FuncTestUtils::createAndFillBlob(info.getTensorDesc()); + } +} + +InferenceEngine::Blob::Ptr generate(const std::shared_ptr node, + const InferenceEngine::InputInfo& info, + size_t port) { + return Activation::generate(info, node->get_input_element_type(0).is_signed()); +} + +InferenceEngine::Blob::Ptr generate(const std::shared_ptr node, + const InferenceEngine::InputInfo& info, + size_t port) { + return Activation::generate(info, node->get_input_element_type(0).is_signed()); +} + +InferenceEngine::Blob::Ptr generate(const std::shared_ptr node, + const InferenceEngine::InputInfo& info, + size_t port) { + if (port == 0) { + return FuncTestUtils::createAndFillBlobFloat(info.getTensorDesc(), 1, 0, 1000, 8234231); + } + return FuncTestUtils::createAndFillBlobFloatNormalDistribution(info.getTensorDesc(), 0.0f, 0.2f, 7235346); +} + +InferenceEngine::Blob::Ptr generate(const std::shared_ptr node, + const InferenceEngine::InputInfo& info, + size_t port) { + return ReduceOps::generate(node->get_reduction_axes(), info); +} + +InferenceEngine::Blob::Ptr generate(const std::shared_ptr node, + const InferenceEngine::InputInfo& info, + size_t port) { + return ReduceOps::generate(node->get_reduction_axes(), info); +} + +InferenceEngine::Blob::Ptr generate(const std::shared_ptr node, + const InferenceEngine::InputInfo& info, + size_t port) { + return Activation::generate(info, node->get_input_element_type(0).is_signed()); +} + +InferenceEngine::Blob::Ptr generate(const std::shared_ptr node, + const InferenceEngine::InputInfo& info, + size_t port) { + return Activation::generate(info, node->get_input_element_type(0).is_signed()); +} + +InferenceEngine::Blob::Ptr generate(const ngraph::op::v5::BatchNormInference node, + const InferenceEngine::InputInfo& info, + size_t port) { + return FuncTestUtils::createAndFillBlobConsistently(info.getTensorDesc(), 3, 0, 1); +} + +InferenceEngine::Blob::Ptr generate(const ngraph::op::v5::GRUSequence node, + const InferenceEngine::InputInfo& info, + size_t port) { + if (port == 2) { + unsigned int m_max_seq_len = 10; + return FuncTestUtils::createAndFillBlob(info.getTensorDesc(), m_max_seq_len, 0); + } + return FuncTestUtils::createAndFillBlob(info.getTensorDesc()); +} + +InferenceEngine::Blob::Ptr generate(const std::shared_ptr node, + const InferenceEngine::InputInfo& info, + size_t port) { + return Activation::generate(info, node->get_input_element_type(0).is_signed()); +} + +InferenceEngine::Blob::Ptr generate(const ngraph::op::v5::Loop node, + const InferenceEngine::InputInfo& info, + size_t port) { + auto tdesc = info.getTensorDesc(); + auto blob = make_blob_with_precision(tdesc); + blob->allocate(); + + if (tdesc.getLayout() == InferenceEngine::SCALAR) { + auto scalar_1d = CommonTestUtils::make_reshape_view(blob, {1}); + unsigned int max_iter_num = 10; + CommonTestUtils::fill_data_with_broadcast(scalar_1d, 0, {static_cast(max_iter_num)}); + } else { + int start_value = 7; + CommonTestUtils::fill_data_with_broadcast(blob, 0, {static_cast(start_value)}); + } + return blob; +} + +InferenceEngine::Blob::Ptr generate(const ngraph::op::v5::LSTMSequence node, + const InferenceEngine::InputInfo& info, + size_t port) { + if (port == 2) { + unsigned int m_max_seq_len = 10; + return FuncTestUtils::createAndFillBlob(info.getTensorDesc(), m_max_seq_len, 0); + } + return FuncTestUtils::createAndFillBlob(info.getTensorDesc()); +} + +InferenceEngine::Blob::Ptr generate(const ngraph::op::v5::NonMaxSuppression node, + const InferenceEngine::InputInfo& info, + size_t port) { + std::cout << "lklkllll" << std::endl; + if (port == 1) { + InferenceEngine::Blob::Ptr blob; + blob = make_blob_with_precision(info.getTensorDesc()); + blob->allocate(); + CommonTestUtils::fill_data_random_float(blob, 1, 0, 1000); + return blob; + } + return FuncTestUtils::createAndFillBlob(info.getTensorDesc()); +} + +InferenceEngine::Blob::Ptr generate(const ngraph::op::v5::RNNSequence node, + const InferenceEngine::InputInfo& info, + size_t port) { + if (port == 2) { + unsigned int m_max_seq_len = 10; + return FuncTestUtils::createAndFillBlob(info.getTensorDesc(), m_max_seq_len, 0); + } + return FuncTestUtils::createAndFillBlob(info.getTensorDesc()); +} + +InferenceEngine::Blob::Ptr generate(const std::shared_ptr node, + const InferenceEngine::InputInfo& info, + size_t port) { + return Activation::generate(info, node->get_input_element_type(0).is_signed(), -10, 20, 4); +} + +template +InferenceEngine::Blob::Ptr generateInput(const std::shared_ptr node, + const InferenceEngine::InputInfo& info, + size_t port) { + return generate(ngraph::as_type_ptr(node), info, port); +} +} // namespace + +InputsMap getInputMap() { + static InputsMap inputsMap{ +#define NGRAPH_OP(NAME, NAMESPACE) {NAMESPACE::NAME::type_info, generateInput}, + #include "ngraph/opsets/opset1_tbl.hpp" + #include "ngraph/opsets/opset2_tbl.hpp" + #include "ngraph/opsets/opset3_tbl.hpp" + #include "ngraph/opsets/opset4_tbl.hpp" + #include "ngraph/opsets/opset5_tbl.hpp" + #include "ngraph/opsets/opset6_tbl.hpp" +#undef NGRAPH_OP + }; + return inputsMap; +} + +} // namespace LayerTestsDefinitions \ No newline at end of file diff --git a/inference-engine/tests/functional/shared_test_classes/src/read_ir/read_ir.cpp b/inference-engine/tests/functional/shared_test_classes/src/read_ir/read_ir.cpp new file mode 100644 index 00000000000..94f4074db35 --- /dev/null +++ b/inference-engine/tests/functional/shared_test_classes/src/read_ir/read_ir.cpp @@ -0,0 +1,89 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "functional_test_utils/core_config.hpp" + +#include "shared_test_classes/read_ir/read_ir.hpp" +#include "shared_test_classes/read_ir/compare_results.hpp" +#include "shared_test_classes/read_ir/generate_inputs.hpp" + +namespace LayerTestsDefinitions { +std::string ReadIRTest::getTestCaseName(const testing::TestParamInfo>& obj) { + std::string pathToModel, deviceName; + std::tie(pathToModel, deviceName) = obj.param; + + std::ostringstream result; + result << "ModelPath=" << pathToModel << "_"; + result << "TargetDevice=" << deviceName << "_"; + return result.str(); +} + +void ReadIRTest::SetUp() { + std::tie(pathToModel, targetDevice) = this->GetParam(); + cnnNetwork = getCore()->ReadNetwork(pathToModel); + function = cnnNetwork.getFunction(); +} + +void ReadIRTest::GenerateInputs() { + auto inputMap = getInputMap(); + const auto& inputsInfo = executableNetwork.GetInputsInfo(); + for (const auto& param : function->get_parameters()) { + const auto infoIt = inputsInfo.find(param->get_friendly_name()); + GTEST_ASSERT_NE(infoIt, inputsInfo.cend()); + + const auto& info = infoIt->second; + for (size_t i = 0; i < param->get_output_size(); i++) { + for (const auto& node : param->get_output_target_inputs(i)) { + const auto nodePtr = node.get_node()->shared_from_this(); + auto it = inputMap.find(nodePtr->get_type_info()); + for (size_t port = 0; port < nodePtr->get_input_size(); ++port) { + if (nodePtr->get_input_node_ptr(port)->shared_from_this() == param->shared_from_this()) { + inputs.push_back(it->second(nodePtr, *info, port)); + } + } + } + } + } +} + +void ReadIRTest::Compare(const std::vector> &expected, + const std::vector &actual) { + auto compareMap = getCompareMap(); + for (const auto& result : function->get_results()) { + for (size_t i = 0; i < result->get_input_size(); ++i) { + const auto inputNode = result->get_input_node_shared_ptr(i); + auto it = compareMap.find(inputNode->get_type_info()); + it->second(inputNode, expected, actual, threshold); + } + } +} + +std::vector ReadIRTest::GetOutputs() { + std::vector outputs; +// CNNNetworkNGraphImpl::getOVNameForTensor works incorrect: _tensorNames is empty +// for (const auto& result : function->get_results()) { +// outputs.push_back(inferRequest.GetBlob(cnnNetwork.getOVNameForTensor(result->get_friendly_name()))); +// } + + for (const auto& result : function->get_results()) { + for (size_t inPort = 0; inPort < result->get_input_size(); ++inPort) { + const auto& inputNode = result->get_input_node_shared_ptr(inPort); + for (size_t outPort = 0; outPort < inputNode->get_output_size(); ++outPort) { + for (const auto& out : inputNode->get_output_target_inputs(outPort)) { + if (out.get_node()->shared_from_this() == result) { + std::string name = inputNode->get_friendly_name(); + if (inputNode->get_output_size() > 1) { + name += "." + std::to_string(outPort); + } + outputs.push_back(inferRequest.GetBlob(name)); + break; + } + } + } + } + } + return outputs; +} +} // namespace LayerTestsDefinitions + diff --git a/inference-engine/tests/functional/shared_test_classes/src/single_layer/psroi_pooling.cpp b/inference-engine/tests/functional/shared_test_classes/src/single_layer/psroi_pooling.cpp index 497de165528..24acd96dfe7 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/single_layer/psroi_pooling.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/single_layer/psroi_pooling.cpp @@ -42,7 +42,7 @@ static int randInt(int low, int high) { return dis(gen); } -static void fillROITensor(float* buffer, int numROIs, int batchSize, + void PSROIPoolingLayerTest::fillROITensor(float* buffer, int numROIs, int batchSize, int height, int width, int groupSize, float spatialScale, int spatialBinsX, int spatialBinsY, const std::string& mode) { int minRoiWidth = groupSize; @@ -61,8 +61,8 @@ static void fillROITensor(float* buffer, int numROIs, int batchSize, } int batchId = 0; for (int i = 0; i < numROIs; i++) { - int sizeX = std::min(width, randInt(minRoiWidth, maxRoiWidth)); - int sizeY = std::min(height, randInt(minRoiHeight, maxRoiHeight)); + int sizeX = std::min(width, randInt(std::min(minRoiWidth, maxRoiWidth), std::max(minRoiWidth, maxRoiWidth))); + int sizeY = std::min(height, randInt(std::min(minRoiWidth, maxRoiWidth), std::max(minRoiWidth, maxRoiWidth))); int startX = randInt(0, std::max(1, width - sizeX - 1)); int startY = randInt(0, std::max(1, height - sizeY - 1)); diff --git a/inference-engine/tests/functional/shared_test_classes/src/single_layer/roi_align.cpp b/inference-engine/tests/functional/shared_test_classes/src/single_layer/roi_align.cpp index 53dcb7226ed..dc0595dcaa7 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/single_layer/roi_align.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/single_layer/roi_align.cpp @@ -47,8 +47,8 @@ static int randInt(int low, int high) { return dis(gen); } -static void fillCoordTensor(std::vector & coords, int height, int width, - float spatialScale, int pooledRatio, int pooledH, int pooledW) { +void ROIAlignLayerTest::fillCoordTensor(std::vector& coords, int height, int width, + float spatialScale, int pooledRatio, int pooledH, int pooledW) { int minRoiWidth = pooledW; int maxRoiWidth = width / pooledRatio; int minRoiHeight = pooledH; @@ -66,7 +66,7 @@ static void fillCoordTensor(std::vector & coords, int height, int width, coords[i * 4 + 3] = (startY + sizeY - 1) / spatialScale; } } -static void fillIdxTensor(std::vector & idx, int batchSize) { +void ROIAlignLayerTest::fillIdxTensor(std::vector& idx, int batchSize) { int batchId = 0; for (int i = 0; i < idx.size(); i++) { idx[i] = batchId; diff --git a/inference-engine/tests/ie_test_utils/common_test_utils/data_utils.hpp b/inference-engine/tests/ie_test_utils/common_test_utils/data_utils.hpp index e26f940fbd5..326a921d582 100644 --- a/inference-engine/tests/ie_test_utils/common_test_utils/data_utils.hpp +++ b/inference-engine/tests/ie_test_utils/common_test_utils/data_utils.hpp @@ -323,8 +323,8 @@ void inline fill_data_normal_random_float(InferenceEngine::Blob::Ptr &blob, } } -template -void inline fill_data_float_array(InferenceEngine::Blob::Ptr &blob, const float values[], const size_t size) { +template +void inline fill_data_float_array(InferenceEngine::Blob::Ptr &blob, const T values[], const size_t size) { using dataType = typename InferenceEngine::PrecisionTrait::value_type; auto *rawBlobDataPtr = blob->buffer().as(); diff --git a/inference-engine/tests/ie_test_utils/common_test_utils/file_utils.hpp b/inference-engine/tests/ie_test_utils/common_test_utils/file_utils.hpp index 81dcdbae97d..9747638aa64 100644 --- a/inference-engine/tests/ie_test_utils/common_test_utils/file_utils.hpp +++ b/inference-engine/tests/ie_test_utils/common_test_utils/file_utils.hpp @@ -3,6 +3,7 @@ // #pragma once +#include #include #include #include @@ -116,6 +117,7 @@ inline bool directoryExists(const std::string &path) { return false; } + inline void directoryFileListRecursive(const std::string& name, std::vector& file_list) { struct CloseDir { void operator()(DIR* d) const noexcept { @@ -171,6 +173,31 @@ inline int createDirectoryRecursive(const std::string& dirPath) { return 0; } +inline std::vector getFileListByPatternRecursive(const std::vector& folderPaths, + const std::regex& pattern) { + auto getFileListByPattern = [&pattern](const std::string& folderPath) { + std::vector allFilePaths; + CommonTestUtils::directoryFileListRecursive(folderPath, allFilePaths); + std::set result; + for (auto& filePath : allFilePaths) { + if (CommonTestUtils::fileExists(filePath) && std::regex_match(filePath, pattern)) { + result.insert(filePath); + } + } + return result; + }; + + std::vector result; + for (auto &&folderPath : folderPaths) { + if (!CommonTestUtils::directoryExists(folderPath)) { + continue; + } + auto fileListByPattern = getFileListByPattern(folderPath); + result.insert(result.end(), fileListByPattern.begin(), fileListByPattern.end()); + } + return result; +} + inline std::string replaceExt(std::string file, const std::string& newExt) { std::string::size_type i = file.rfind('.', file.length()); diff --git a/inference-engine/tests/ie_test_utils/common_test_utils/test_constants.hpp b/inference-engine/tests/ie_test_utils/common_test_utils/test_constants.hpp index 3ab8e8db0a3..bc7b6def87d 100644 --- a/inference-engine/tests/ie_test_utils/common_test_utils/test_constants.hpp +++ b/inference-engine/tests/ie_test_utils/common_test_utils/test_constants.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2019 Intel Corporation +// Copyright (C) 2019-2021 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // #pragma once @@ -16,6 +16,8 @@ const char DEVICE_MULTI[] = "MULTI"; const char DEVICE_TEMPLATE[] = "TEMPLATE"; const char DEVICE_HETERO[] = "HETERO"; +const char REPORT_FILENAME[] = "report.xml"; + #ifdef _WIN32 #ifdef __MINGW32__ const char pre[] = "lib"; diff --git a/inference-engine/tests/ie_test_utils/functional_test_utils/include/functional_test_utils/blob_utils.hpp b/inference-engine/tests/ie_test_utils/functional_test_utils/include/functional_test_utils/blob_utils.hpp index 73f445b9d42..2f7c89459d4 100644 --- a/inference-engine/tests/ie_test_utils/functional_test_utils/include/functional_test_utils/blob_utils.hpp +++ b/inference-engine/tests/ie_test_utils/functional_test_utils/include/functional_test_utils/blob_utils.hpp @@ -436,13 +436,14 @@ inline InferenceEngine::Blob::Ptr createAndFillBlobFloat(const InferenceEngine:: return blob; } +template inline InferenceEngine::Blob::Ptr createAndFillBlobWithFloatArray(const InferenceEngine::TensorDesc &td, - const float values[], + const T values[], const int size) { InferenceEngine::Blob::Ptr blob = make_blob_with_precision(td); blob->allocate(); switch (td.getPrecision()) { -#define CASE(X) case X: CommonTestUtils::fill_data_float_array(blob, values, size); break; +#define CASE(X) case X: CommonTestUtils::fill_data_float_array(blob, values, size); break; CASE(InferenceEngine::Precision::FP32) CASE(InferenceEngine::Precision::FP16) CASE(InferenceEngine::Precision::U8) diff --git a/inference-engine/tests/ie_test_utils/functional_test_utils/layer_tests_summary/template/report_template.html b/inference-engine/tests/ie_test_utils/functional_test_utils/layer_tests_summary/template/report_template.html index e95f74d12a7..7b4018a7803 100644 --- a/inference-engine/tests/ie_test_utils/functional_test_utils/layer_tests_summary/template/report_template.html +++ b/inference-engine/tests/ie_test_utils/functional_test_utils/layer_tests_summary/template/report_template.html @@ -46,7 +46,7 @@ {% if op in results[d] -%} {{ results[d][op].passrate }}% (p:{{ results[d][op].passed }}, - f:{{ results[d][op].failed }},s:{{ results[d][op].skipped }}) + f:{{ results[d][op].failed }},s:{{ results[d][op].skipped }},c:{{ results[d][op].crashed }}) {% else -%} No tests