Update classes func tests (#19663)
* Remove legacy classes from functional_test_utils * Fixed code style * Fixed build all for macOS * Suppress warning * Revert old functions for internal plugins
This commit is contained in:
parent
7becaf8494
commit
3be8b58d2a
@ -33,7 +33,7 @@ public:
|
||||
auto prefix = ov::test::utils::generateTestFilePrefix();
|
||||
modelName = prefix + modelName;
|
||||
weightsName = prefix + weightsName;
|
||||
FuncTestUtils::TestModel::generateTestModel(modelName, weightsName);
|
||||
ov::test::utils::generate_test_model(modelName, weightsName);
|
||||
}
|
||||
|
||||
void TearDown() override {
|
||||
|
@ -14,6 +14,7 @@
|
||||
#include "common_test_utils/file_utils.hpp"
|
||||
#include "common_test_utils/test_assertions.hpp"
|
||||
#include "functional_test_utils/test_model/test_model.hpp"
|
||||
#include "ie_extension.h"
|
||||
#include "openvino/runtime/core.hpp"
|
||||
#include "openvino/util/file_util.hpp"
|
||||
#ifdef __GLIBC__
|
||||
@ -32,7 +33,7 @@ public:
|
||||
auto prefix = ov::test::utils::generateTestFilePrefix();
|
||||
modelName = prefix + modelName;
|
||||
weightsName = prefix + weightsName;
|
||||
FuncTestUtils::TestModel::generateTestModel(modelName, weightsName);
|
||||
ov::test::utils::generate_test_model(modelName, weightsName);
|
||||
}
|
||||
|
||||
void TearDown() override {
|
||||
@ -60,10 +61,12 @@ public:
|
||||
|
||||
void safeAddExtension(ov::Core& core) {
|
||||
try {
|
||||
OPENVINO_SUPPRESS_DEPRECATED_START
|
||||
auto extension = std::make_shared<InferenceEngine::Extension>(
|
||||
ov::util::make_plugin_library_name(ov::test::utils::getExecutableDirectory(),
|
||||
std::string("template_extension") + IE_BUILD_POSTFIX));
|
||||
core.add_extension(extension);
|
||||
OPENVINO_SUPPRESS_DEPRECATED_END
|
||||
} catch (const ov::Exception& ex) {
|
||||
ASSERT_STR_CONTAINS(ex.what(), "name: custom_opset. Opset");
|
||||
}
|
||||
|
@ -57,7 +57,7 @@ inline std::string get_node_type(const std::shared_ptr<ov::Node>& node) {
|
||||
}
|
||||
|
||||
static std::map<std::string, std::string> get_max_ops_versions() {
|
||||
std::map<std::string, std::set<std::string>> unique_ops = FuncTestUtils::get_unique_ops();
|
||||
std::map<std::string, std::set<std::string>> unique_ops = ov::test::utils::get_unique_ops();
|
||||
std::map<std::string, std::string> max_ops_versions;
|
||||
|
||||
for (auto op_info : unique_ops) {
|
||||
@ -78,7 +78,7 @@ static std::map<std::string, std::string> get_last_opset_version_map() {
|
||||
std::string opset_name = std::prev(opset_map.end())->first;
|
||||
const ov::OpSet& opset = std::prev(opset_map.end())->second();
|
||||
for (const auto& op : opset.get_type_info_set()) {
|
||||
res[op.name] = FuncTestUtils::get_op_version(op.get_version());
|
||||
res[op.name] = ov::test::utils::get_op_version(op.get_version());
|
||||
}
|
||||
|
||||
return res;
|
||||
@ -93,7 +93,7 @@ inline size_t get_node_priority_by_version(const std::shared_ptr<ov::Node>& node
|
||||
size_t priority = 1;
|
||||
auto type_info = node->get_type_info();
|
||||
if (max_ops_versions.count(type_info.name)) {
|
||||
std::string version_id = FuncTestUtils::get_op_version(type_info.version_id);
|
||||
std::string version_id = ov::test::utils::get_op_version(type_info.version_id);
|
||||
if (version_id == max_ops_versions[type_info.name]) {
|
||||
priority = 2;
|
||||
if (version_id == last_opset_versions_map[type_info.name]) {
|
||||
@ -107,4 +107,4 @@ inline size_t get_node_priority_by_version(const std::shared_ptr<ov::Node>& node
|
||||
|
||||
} // namespace subgraph_dumper
|
||||
} // namespace tools
|
||||
} // namespace ov
|
||||
} // namespace ov
|
||||
|
@ -90,7 +90,7 @@ static std::set<std::string> get_element_type_names() {
|
||||
return result;
|
||||
}
|
||||
|
||||
static auto unique_ops = FuncTestUtils::get_unique_ops();
|
||||
static auto unique_ops = ov::test::utils::get_unique_ops();
|
||||
static auto element_type_names = get_element_type_names();
|
||||
|
||||
inline std::string get_ref_path(const std::string& model_path) {
|
||||
|
@ -74,7 +74,7 @@ int main(int argc, char* argv[]) {
|
||||
throw std::runtime_error("Using mutually exclusive arguments: --extend_report and --report_unique_name");
|
||||
}
|
||||
|
||||
FuncTestUtils::SkipTestsConfig::disable_tests_skipping = FLAGS_disable_test_config;
|
||||
ov::test::utils::disable_tests_skipping = FLAGS_disable_test_config;
|
||||
ov::test::utils::OpSummary::setExtendReport(FLAGS_extend_report);
|
||||
ov::test::utils::OpSummary::setExtractBody(FLAGS_extract_body);
|
||||
ov::test::utils::OpSummary::setSaveReportWithUniqueName(FLAGS_report_unique_name);
|
||||
|
@ -136,7 +136,7 @@ void ReadIRTest::query_model() {
|
||||
}
|
||||
s.setDeviceName(targetDevice);
|
||||
|
||||
if (FuncTestUtils::SkipTestsConfig::currentTestIsDisabled()) {
|
||||
if (ov::test::utils::current_test_is_disabled()) {
|
||||
s.updateOPsStats(functionRefs, ov::test::utils::PassRate::Statuses::SKIPPED, rel_influence_coef);
|
||||
GTEST_SKIP() << "Disabled test due to configuration" << std::endl;
|
||||
} else {
|
||||
|
@ -151,7 +151,7 @@ void HeteroSyntheticTest::SetUp() {
|
||||
}
|
||||
|
||||
void HeteroSyntheticTest::TearDown() {
|
||||
if (!FuncTestUtils::SkipTestsConfig::currentTestIsDisabled()) {
|
||||
if (!ov::test::utils::current_test_is_disabled()) {
|
||||
for (auto&& pluginName : _registredPlugins) {
|
||||
PluginCache::get().ie()->UnregisterPlugin(pluginName);
|
||||
}
|
||||
@ -194,7 +194,7 @@ TEST_P(HeteroSyntheticTest, someLayersToMajorPluginOthersToFallback) {
|
||||
auto affinities = SetUpAffinity();
|
||||
SCOPED_TRACE(affinities);
|
||||
Run();
|
||||
if (!FuncTestUtils::SkipTestsConfig::currentTestIsDisabled()) {
|
||||
if (!ov::test::utils::current_test_is_disabled()) {
|
||||
ASSERT_NE(nullptr, cnnNetwork.getFunction());
|
||||
}
|
||||
}
|
||||
|
@ -11,12 +11,12 @@
|
||||
#include "set_device_name.hpp"
|
||||
|
||||
int main(int argc, char *argv[]) {
|
||||
FuncTestUtils::SkipTestsConfig::disable_tests_skipping = false;
|
||||
ov::test::utils::disable_tests_skipping = false;
|
||||
bool print_custom_help = false;
|
||||
std::string outputFolderPath(".");
|
||||
for (int i = 0; i < argc; ++i) {
|
||||
if (std::string(argv[i]) == "--disable_tests_skipping") {
|
||||
FuncTestUtils::SkipTestsConfig::disable_tests_skipping = true;
|
||||
ov::test::utils::disable_tests_skipping = true;
|
||||
} else if (std::string(argv[i]) == "--extract_body") {
|
||||
ov::test::utils::OpSummary::setExtractBody(true);
|
||||
} else if (std::string(argv[i]) == "--help") {
|
||||
|
@ -24,7 +24,7 @@ LayerTestsCommon::LayerTestsCommon() : threshold(1e-2f), abs_threshold(-1.f) {
|
||||
}
|
||||
|
||||
void LayerTestsCommon::Run() {
|
||||
bool isCurrentTestDisabled = FuncTestUtils::SkipTestsConfig::currentTestIsDisabled();
|
||||
bool isCurrentTestDisabled = ov::test::utils::current_test_is_disabled();
|
||||
|
||||
ov::test::utils::PassRate::Statuses status = isCurrentTestDisabled ?
|
||||
ov::test::utils::PassRate::Statuses::SKIPPED :
|
||||
|
@ -43,7 +43,7 @@ std::ostream& operator <<(std::ostream& os, const InputShape& inputShape) {
|
||||
|
||||
void SubgraphBaseTest::run() {
|
||||
is_reported = true;
|
||||
bool isCurrentTestDisabled = FuncTestUtils::SkipTestsConfig::currentTestIsDisabled();
|
||||
bool isCurrentTestDisabled = ov::test::utils::current_test_is_disabled();
|
||||
|
||||
ov::test::utils::PassRate::Statuses status = isCurrentTestDisabled ?
|
||||
ov::test::utils::PassRate::Statuses::SKIPPED :
|
||||
|
@ -9,7 +9,7 @@
|
||||
namespace ov {
|
||||
namespace test {
|
||||
void SnippetsTestsCommon::validateNumSubgraphs() {
|
||||
bool isCurrentTestDisabled = FuncTestUtils::SkipTestsConfig::currentTestIsDisabled();
|
||||
bool isCurrentTestDisabled = ov::test::utils::current_test_is_disabled();
|
||||
if (isCurrentTestDisabled)
|
||||
GTEST_SKIP() << "Disabled test due to configuration" << std::endl;
|
||||
|
||||
|
@ -28,7 +28,7 @@ std::string ConversionLayerTest::getTestCaseName(const testing::TestParamInfo<Co
|
||||
}
|
||||
|
||||
void ConversionLayerTest::SetUp() {
|
||||
if (FuncTestUtils::SkipTestsConfig::currentTestIsDisabled()) {
|
||||
if (ov::test::utils::current_test_is_disabled()) {
|
||||
GTEST_SKIP() << "Disabled test due to configuration" << std::endl;
|
||||
}
|
||||
ngraph::helpers::ConversionTypes conversionOpType;
|
||||
|
@ -9,6 +9,7 @@
|
||||
#include "ngraph/opsets/opset7.hpp"
|
||||
#include "ngraph_functions/builders.hpp"
|
||||
#include "ngraph/pass/low_latency.hpp"
|
||||
#include "openvino/op/util/variable_context.hpp"
|
||||
#include "shared_test_classes/single_layer/memory.hpp"
|
||||
|
||||
using namespace ngraph;
|
||||
@ -46,7 +47,7 @@ namespace LayerTestsDefinitions {
|
||||
}
|
||||
|
||||
auto hostTensor = std::make_shared<HostTensor>(ngPrc, inputShape);
|
||||
auto variable_context = VariableContext();
|
||||
auto variable_context = ov::op::util::VariableContext();
|
||||
auto variable_value = std::make_shared<VariableValue>(hostTensor);
|
||||
variable_context.set_variable_value(function->get_variable_by_id("v0"), variable_value);
|
||||
eval_context["VariableContext"] = variable_context;
|
||||
@ -66,7 +67,7 @@ namespace LayerTestsDefinitions {
|
||||
|
||||
auto &s = ov::test::utils::OpSummary::getInstance();
|
||||
s.setDeviceName(targetDevice);
|
||||
if (FuncTestUtils::SkipTestsConfig::currentTestIsDisabled()) {
|
||||
if (ov::test::utils::current_test_is_disabled()) {
|
||||
s.updateOPsStats(function, ov::test::utils::PassRate::Statuses::SKIPPED);
|
||||
GTEST_SKIP() << "Disabled test due to configuration" << std::endl;
|
||||
} else {
|
||||
|
@ -71,9 +71,7 @@ void MulConvFusion::SetUp() {
|
||||
std::shared_ptr<ngraph::Node> conv;
|
||||
if (conv_type == ngraph::opset8::Convolution::get_type_info_static()) {
|
||||
weights = std::make_shared<ngraph::opset8::Multiply>(weights, mul_const);
|
||||
OPENVINO_SUPPRESS_DEPRECATED_START
|
||||
weights = ngraph::get_constant_from_source(weights);
|
||||
OPENVINO_SUPPRESS_DEPRECATED_END
|
||||
weights = ov::get_constant_from_source(weights);
|
||||
ASSERT_NE(nullptr, weights);
|
||||
conv = std::make_shared<ngraph::opset8::Convolution>(param, weights, strides, pad_begin, pad_end, strides);
|
||||
} else if (conv_type == ngraph::opset8::GroupConvolution::get_type_info_static()) {
|
||||
@ -84,9 +82,7 @@ void MulConvFusion::SetUp() {
|
||||
auto reshape = std::make_shared<ngraph::opset8::Reshape>(mul_const,
|
||||
ngraph::op::Constant::create(ngraph::element::u64, ngraph::Shape{const_shape.size()}, const_shape), false);
|
||||
weights = std::make_shared<ngraph::opset8::Multiply>(weights, reshape);
|
||||
OPENVINO_SUPPRESS_DEPRECATED_START
|
||||
weights = ngraph::get_constant_from_source(weights);
|
||||
OPENVINO_SUPPRESS_DEPRECATED_END
|
||||
weights = ov::get_constant_from_source(weights);
|
||||
ASSERT_NE(nullptr, weights);
|
||||
conv = std::make_shared<ngraph::opset8::GroupConvolution>(param, weights, strides, pad_begin, pad_end, strides);
|
||||
} else if (conv_type == ngraph::opset8::ConvolutionBackpropData::get_type_info_static()) {
|
||||
@ -96,9 +92,7 @@ void MulConvFusion::SetUp() {
|
||||
auto reshape = std::make_shared<ngraph::opset8::Reshape>(mul_const,
|
||||
ngraph::op::Constant::create(ngraph::element::u64, ngraph::Shape{const_shape.size()}, const_shape), false);
|
||||
weights = std::make_shared<ngraph::opset8::Multiply>(weights, reshape);
|
||||
OPENVINO_SUPPRESS_DEPRECATED_START
|
||||
weights = ngraph::get_constant_from_source(weights);
|
||||
OPENVINO_SUPPRESS_DEPRECATED_END
|
||||
weights = ov::get_constant_from_source(weights);
|
||||
ASSERT_NE(nullptr, weights);
|
||||
conv = std::make_shared<ngraph::opset8::ConvolutionBackpropData>(param, weights, strides, pad_begin, pad_end, strides);
|
||||
} else if (conv_type == ngraph::opset8::GroupConvolutionBackpropData::get_type_info_static()) {
|
||||
@ -110,9 +104,7 @@ void MulConvFusion::SetUp() {
|
||||
auto reshape = std::make_shared<ngraph::opset8::Reshape>(mul_const,
|
||||
ngraph::op::Constant::create(ngraph::element::u64, ngraph::Shape{const_shape.size()}, const_shape), false);
|
||||
weights = std::make_shared<ngraph::opset8::Multiply>(weights, reshape);
|
||||
OPENVINO_SUPPRESS_DEPRECATED_START
|
||||
weights = ngraph::get_constant_from_source(weights);
|
||||
OPENVINO_SUPPRESS_DEPRECATED_END
|
||||
weights = ov::get_constant_from_source(weights);
|
||||
ASSERT_NE(nullptr, weights);
|
||||
conv = std::make_shared<ngraph::opset8::GroupConvolutionBackpropData>(param, weights, strides, pad_begin, pad_end, strides);
|
||||
} else {
|
||||
|
@ -8,7 +8,7 @@ addIeTarget(
|
||||
NAME ${TARGET_NAME}
|
||||
TYPE STATIC
|
||||
ROOT ${CMAKE_CURRENT_SOURCE_DIR}
|
||||
ADD_CPPLINT
|
||||
ADD_CLANG_FORMAT
|
||||
DEVELOPER_PACKAGE
|
||||
tests
|
||||
INCLUDES
|
||||
|
@ -4,31 +4,29 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <cmath>
|
||||
#include <string>
|
||||
#include <algorithm>
|
||||
#include <vector>
|
||||
#include <type_traits>
|
||||
#include <vector>
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
#include "blob_factory.hpp"
|
||||
#include "blob_transform.hpp"
|
||||
#include "ie_compound_blob.h"
|
||||
#include "precision_utils.h"
|
||||
#include "common_test_utils/data_utils.hpp"
|
||||
#include "common_test_utils/test_constants.hpp"
|
||||
|
||||
#include "openvino/runtime/common.hpp"
|
||||
#include "ie_compound_blob.h"
|
||||
#include "ie_ngraph_utils.hpp"
|
||||
|
||||
#include "openvino/runtime/common.hpp"
|
||||
#include "precision_utils.h"
|
||||
|
||||
namespace FuncTestUtils {
|
||||
namespace Bf16TestUtils {
|
||||
inline short reducePrecisionBitwiseS(const float in);
|
||||
} // namespace Bf16TestUtils
|
||||
|
||||
enum CompareType{
|
||||
enum CompareType {
|
||||
ABS,
|
||||
REL,
|
||||
ABS_AND_REL // if absolute and relative differences are too high, an exception is thrown
|
||||
@ -48,10 +46,14 @@ enum CompareType{
|
||||
* @param thr2 Second threshold of difference
|
||||
* @param printData A flag if data printing is demanded
|
||||
*/
|
||||
template<typename dType>
|
||||
inline void compareRawBuffers(const dType *res, const dType *ref,
|
||||
size_t resSize, size_t refSize,
|
||||
CompareType compareType, float thr1 = 0.01, float thr2 = 0.01,
|
||||
template <typename dType>
|
||||
inline void compareRawBuffers(const dType* res,
|
||||
const dType* ref,
|
||||
size_t resSize,
|
||||
size_t refSize,
|
||||
CompareType compareType,
|
||||
float thr1 = 0.01,
|
||||
float thr2 = 0.01,
|
||||
bool printData = false) {
|
||||
if (printData) {
|
||||
std::cout << "Reference results: " << std::endl;
|
||||
@ -67,31 +69,31 @@ inline void compareRawBuffers(const dType *res, const dType *ref,
|
||||
}
|
||||
|
||||
switch (compareType) {
|
||||
case CompareType::ABS:
|
||||
for (size_t i = 0; i < refSize; i++) {
|
||||
float absDiff = std::abs(res[i] - ref[i]);
|
||||
ASSERT_LE(absDiff, thr1) << "Relative comparison of values ref: " << ref[i] << " and res: "
|
||||
<< res[i] << " , index in blobs: " << i << " failed!";
|
||||
}
|
||||
break;
|
||||
case CompareType::REL:
|
||||
for (size_t i = 0; i < refSize; i++) {
|
||||
float absDiff = std::abs(res[i] - ref[i]);
|
||||
case CompareType::ABS:
|
||||
for (size_t i = 0; i < refSize; i++) {
|
||||
float absDiff = std::abs(res[i] - ref[i]);
|
||||
ASSERT_LE(absDiff, thr1) << "Relative comparison of values ref: " << ref[i] << " and res: " << res[i]
|
||||
<< " , index in blobs: " << i << " failed!";
|
||||
}
|
||||
break;
|
||||
case CompareType::REL:
|
||||
for (size_t i = 0; i < refSize; i++) {
|
||||
float absDiff = std::abs(res[i] - ref[i]);
|
||||
float relDiff = absDiff / std::max(res[i], ref[i]);
|
||||
ASSERT_LE(relDiff, thr2) << "Relative comparison of values ref: " << ref[i] << " and res: " << res[i]
|
||||
<< " , index in blobs: " << i << " failed!";
|
||||
}
|
||||
break;
|
||||
case CompareType::ABS_AND_REL:
|
||||
for (size_t i = 0; i < refSize; i++) {
|
||||
float absDiff = std::abs(res[i] - ref[i]);
|
||||
if (absDiff > thr1) {
|
||||
float relDiff = absDiff / std::max(res[i], ref[i]);
|
||||
ASSERT_LE(relDiff, thr2) << "Relative comparison of values ref: " << ref[i] << " and res: "
|
||||
<< res[i] << " , index in blobs: " << i << " failed!";
|
||||
ASSERT_LE(relDiff, thr2) << "Comparison of values ref: " << ref[i] << " and res: " << res[i]
|
||||
<< " , index in blobs: " << i << " failed!";
|
||||
}
|
||||
break;
|
||||
case CompareType::ABS_AND_REL:
|
||||
for (size_t i = 0; i < refSize; i++) {
|
||||
float absDiff = std::abs(res[i] - ref[i]);
|
||||
if (absDiff > thr1) {
|
||||
float relDiff = absDiff / std::max(res[i], ref[i]);
|
||||
ASSERT_LE(relDiff, thr2) << "Comparison of values ref: " << ref[i] << " and res: "
|
||||
<< res[i] << " , index in blobs: " << i << " failed!";
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
/**
|
||||
@ -105,9 +107,11 @@ inline void compareRawBuffers(const dType *res, const dType *ref,
|
||||
* @param thr Threshold of difference, absolute and relative simultaneously
|
||||
* @param printData Flag if data printing is demanded
|
||||
*/
|
||||
template<typename dType>
|
||||
inline void compareRawBuffers(const dType *res, const dType *ref,
|
||||
size_t resSize, size_t refSize,
|
||||
template <typename dType>
|
||||
inline void compareRawBuffers(const dType* res,
|
||||
const dType* ref,
|
||||
size_t resSize,
|
||||
size_t refSize,
|
||||
float thr = 0.01,
|
||||
bool printData = false) {
|
||||
compareRawBuffers(res, ref, resSize, refSize, CompareType::ABS_AND_REL, thr, thr, printData);
|
||||
@ -127,18 +131,24 @@ inline void compareRawBuffers(const dType *res, const dType *ref,
|
||||
* @param thr2 Second threshold of difference
|
||||
* @param printData A flag if data printing is demanded
|
||||
*/
|
||||
template<typename dType>
|
||||
inline void compareRawBuffers(const std::vector<dType *> res, const std::vector<dType *> ref,
|
||||
const std::vector<size_t> &resSizes, const std::vector<size_t> &refSizes,
|
||||
template <typename dType>
|
||||
inline void compareRawBuffers(const std::vector<dType*> res,
|
||||
const std::vector<dType*> ref,
|
||||
const std::vector<size_t>& resSizes,
|
||||
const std::vector<size_t>& refSizes,
|
||||
CompareType compareType,
|
||||
float thr1 = 0.01, float thr2 = 0.01, bool printData = false) {
|
||||
float thr1 = 0.01,
|
||||
float thr2 = 0.01,
|
||||
bool printData = false) {
|
||||
ASSERT_TRUE(res.size() == ref.size()) << "Reference and Results vector have to be same length";
|
||||
ASSERT_TRUE(res.size() == resSizes.size()) << "Results vector and elements count vector have to be same length";
|
||||
ASSERT_TRUE(ref.size() == refSizes.size()) << "Reference vector and elements count vector have to be same length";
|
||||
for (size_t i = 0; i < res.size(); i++) {
|
||||
if (printData) std::cout << "BEGIN CHECK BUFFER [" << i << "]" << std::endl;
|
||||
if (printData)
|
||||
std::cout << "BEGIN CHECK BUFFER [" << i << "]" << std::endl;
|
||||
compareRawBuffers(res[i], ref[i], resSizes[i], refSizes[i], compareType, thr1, thr2, printData);
|
||||
if (printData) std::cout << "END CHECK BUFFER [" << i << "]" << std::endl;
|
||||
if (printData)
|
||||
std::cout << "END CHECK BUFFER [" << i << "]" << std::endl;
|
||||
}
|
||||
}
|
||||
/**
|
||||
@ -152,10 +162,13 @@ inline void compareRawBuffers(const std::vector<dType *> res, const std::vector<
|
||||
* @param thr Threshold of difference, absolute and relative simultaneously
|
||||
* @param printData A flag if data printing is demanded
|
||||
*/
|
||||
template<typename dType>
|
||||
inline void compareRawBuffers(const std::vector<dType *> res, const std::vector<dType *> ref,
|
||||
const std::vector<size_t> &resSizes, const std::vector<size_t> &refSizes,
|
||||
float thr = 0.01, bool printData = false) {
|
||||
template <typename dType>
|
||||
inline void compareRawBuffers(const std::vector<dType*> res,
|
||||
const std::vector<dType*> ref,
|
||||
const std::vector<size_t>& resSizes,
|
||||
const std::vector<size_t>& refSizes,
|
||||
float thr = 0.01,
|
||||
bool printData = false) {
|
||||
compareRawBuffers(res, ref, resSizes, refSizes, CompareType::ABS_AND_REL, thr, thr, printData);
|
||||
}
|
||||
/**
|
||||
@ -173,18 +186,24 @@ inline void compareRawBuffers(const std::vector<dType *> res, const std::vector<
|
||||
* @param thr2 Second threshold of difference
|
||||
* @param printData A flag if data printing is demanded
|
||||
*/
|
||||
template<typename dType>
|
||||
inline void compareRawBuffers(const std::vector<dType *> res, const std::vector<std::shared_ptr<dType *>> ref,
|
||||
const std::vector<size_t> &resSizes, const std::vector<size_t> &refSizes,
|
||||
template <typename dType>
|
||||
inline void compareRawBuffers(const std::vector<dType*> res,
|
||||
const std::vector<std::shared_ptr<dType*>> ref,
|
||||
const std::vector<size_t>& resSizes,
|
||||
const std::vector<size_t>& refSizes,
|
||||
CompareType compareType,
|
||||
float thr1 = 0.01, float thr2 = 0.01, bool printData = false) {
|
||||
float thr1 = 0.01,
|
||||
float thr2 = 0.01,
|
||||
bool printData = false) {
|
||||
ASSERT_TRUE(res.size() == ref.size()) << "Reference and Results vector have to be same length";
|
||||
ASSERT_TRUE(res.size() == resSizes.size()) << "Results vector and elements count vector have to be same length";
|
||||
ASSERT_TRUE(ref.size() == refSizes.size()) << "Reference vector and elements count vector have to be same length";
|
||||
for (size_t i = 0; i < res.size(); i++) {
|
||||
if (printData) std::cout << "BEGIN CHECK BUFFER [" << i << "]" << std::endl;
|
||||
if (printData)
|
||||
std::cout << "BEGIN CHECK BUFFER [" << i << "]" << std::endl;
|
||||
compareRawBuffers(res[i], *ref[i], resSizes[i], refSizes[i], compareType, thr1, thr2, printData);
|
||||
if (printData) std::cout << "END CHECK BUFFER [" << i << "]" << std::endl;
|
||||
if (printData)
|
||||
std::cout << "END CHECK BUFFER [" << i << "]" << std::endl;
|
||||
}
|
||||
}
|
||||
/**
|
||||
@ -198,22 +217,27 @@ inline void compareRawBuffers(const std::vector<dType *> res, const std::vector<
|
||||
* @param thr Threshold of difference, absolute and relative simultaneously
|
||||
* @param printData A flag if data printing is demanded
|
||||
*/
|
||||
template<typename dType>
|
||||
inline void compareRawBuffers(const std::vector<dType *> res, const std::vector<std::shared_ptr<dType *>> ref,
|
||||
const std::vector<size_t> &resSizes, const std::vector<size_t> &refSizes,
|
||||
float thr = 0.01, bool printData = false) {
|
||||
template <typename dType>
|
||||
inline void compareRawBuffers(const std::vector<dType*> res,
|
||||
const std::vector<std::shared_ptr<dType*>> ref,
|
||||
const std::vector<size_t>& resSizes,
|
||||
const std::vector<size_t>& refSizes,
|
||||
float thr = 0.01,
|
||||
bool printData = false) {
|
||||
compareRawBuffers(res, ref, resSizes, refSizes, CompareType::ABS_AND_REL, thr, thr, printData);
|
||||
}
|
||||
|
||||
template<InferenceEngine::Precision::ePrecision PRC>
|
||||
inline void
|
||||
compareBlobData(const InferenceEngine::Blob::Ptr &res, const InferenceEngine::Blob::Ptr &ref, float max_diff = 0.01,
|
||||
const std::string &assertDetails = "", bool printData = false) {
|
||||
template <InferenceEngine::Precision::ePrecision PRC>
|
||||
inline void compareBlobData(const InferenceEngine::Blob::Ptr& res,
|
||||
const InferenceEngine::Blob::Ptr& ref,
|
||||
float max_diff = 0.01,
|
||||
const std::string& assertDetails = "",
|
||||
bool printData = false) {
|
||||
using dataType = typename InferenceEngine::PrecisionTrait<PRC>::value_type;
|
||||
const dataType *res_ptr = res->cbuffer().as<dataType *>();
|
||||
const dataType* res_ptr = res->cbuffer().as<dataType*>();
|
||||
size_t res_size = res->byteSize();
|
||||
|
||||
const dataType *ref_ptr = ref->cbuffer().as<dataType *>();
|
||||
const dataType* ref_ptr = ref->cbuffer().as<dataType*>();
|
||||
size_t ref_size = ref->byteSize();
|
||||
|
||||
ASSERT_EQ(res_size, ref_size) << "Comparing blobs have different size. " << assertDetails;
|
||||
@ -231,25 +255,28 @@ compareBlobData(const InferenceEngine::Blob::Ptr &res, const InferenceEngine::Bl
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < ref_size / sizeof(dataType); i++) {
|
||||
auto resVal = PRC == InferenceEngine::Precision::FP16 ? InferenceEngine::PrecisionUtils::f16tof32(static_cast<InferenceEngine::ie_fp16>(res_ptr[i]))
|
||||
: static_cast<float>(res_ptr[i]);
|
||||
auto refVal = PRC == InferenceEngine::Precision::FP16 ? InferenceEngine::PrecisionUtils::f16tof32(static_cast<InferenceEngine::ie_fp16>(ref_ptr[i]))
|
||||
: static_cast<float>(ref_ptr[i]);
|
||||
auto resVal = PRC == InferenceEngine::Precision::FP16
|
||||
? InferenceEngine::PrecisionUtils::f16tof32(static_cast<InferenceEngine::ie_fp16>(res_ptr[i]))
|
||||
: static_cast<float>(res_ptr[i]);
|
||||
auto refVal = PRC == InferenceEngine::Precision::FP16
|
||||
? InferenceEngine::PrecisionUtils::f16tof32(static_cast<InferenceEngine::ie_fp16>(ref_ptr[i]))
|
||||
: static_cast<float>(ref_ptr[i]);
|
||||
float absDiff = std::abs(resVal - refVal);
|
||||
if (absDiff > max_diff) {
|
||||
float relDiff = absDiff / std::max(res_ptr[i], ref_ptr[i]);
|
||||
ASSERT_LE(relDiff, max_diff) << "Relative comparison of values ref: " << ref_ptr[i] << " and res: "
|
||||
<< res_ptr[i] << " , index in blobs: " << i << " failed!" << assertDetails;
|
||||
ASSERT_LE(relDiff, max_diff) << "Relative comparison of values ref: " << ref_ptr[i]
|
||||
<< " and res: " << res_ptr[i] << " , index in blobs: " << i << " failed!"
|
||||
<< assertDetails;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template<InferenceEngine::Precision::ePrecision PRC>
|
||||
inline void
|
||||
compareBlobData(const std::vector<InferenceEngine::Blob::Ptr> &res, const std::vector<InferenceEngine::Blob::Ptr> &ref,
|
||||
float max_diff = 0.01,
|
||||
const std::string &assertDetails = "", bool printData = false) {
|
||||
template <InferenceEngine::Precision::ePrecision PRC>
|
||||
inline void compareBlobData(const std::vector<InferenceEngine::Blob::Ptr>& res,
|
||||
const std::vector<InferenceEngine::Blob::Ptr>& ref,
|
||||
float max_diff = 0.01,
|
||||
const std::string& assertDetails = "",
|
||||
bool printData = false) {
|
||||
IE_ASSERT(res.size() == ref.size()) << "Length of comparing and references blobs vector are not equal!"
|
||||
<< assertDetails;
|
||||
for (size_t i = 0; i < res.size(); i++) {
|
||||
@ -261,46 +288,47 @@ compareBlobData(const std::vector<InferenceEngine::Blob::Ptr> &res, const std::v
|
||||
}
|
||||
}
|
||||
|
||||
inline void
|
||||
compareBlobs(const InferenceEngine::Blob::Ptr &res, const InferenceEngine::Blob::Ptr &ref, float max_diff = 0.01,
|
||||
const std::string &assertDetails = "", bool printData = false) {
|
||||
ASSERT_EQ(res->byteSize(), ref->byteSize()) << "Blobs have different byteSize(): "
|
||||
<< res->byteSize() << " and " << ref->byteSize();
|
||||
inline void compareBlobs(const InferenceEngine::Blob::Ptr& res,
|
||||
const InferenceEngine::Blob::Ptr& ref,
|
||||
float max_diff = 0.01,
|
||||
const std::string& assertDetails = "",
|
||||
bool printData = false) {
|
||||
ASSERT_EQ(res->byteSize(), ref->byteSize())
|
||||
<< "Blobs have different byteSize(): " << res->byteSize() << " and " << ref->byteSize();
|
||||
|
||||
ASSERT_EQ(res->getTensorDesc(), ref->getTensorDesc()) << "Blobs have different TensorDesc()";
|
||||
|
||||
switch (res->getTensorDesc().getPrecision()) {
|
||||
#define COMPARE_WITH_REF(TYPE) case TYPE: { \
|
||||
FuncTestUtils::compareBlobData<TYPE>(res, \
|
||||
ref, \
|
||||
max_diff, \
|
||||
assertDetails, \
|
||||
printData); break; }
|
||||
#define COMPARE_WITH_REF(TYPE) \
|
||||
case TYPE: { \
|
||||
FuncTestUtils::compareBlobData<TYPE>(res, ref, max_diff, assertDetails, printData); \
|
||||
break; \
|
||||
}
|
||||
COMPARE_WITH_REF(InferenceEngine::Precision::FP32);
|
||||
COMPARE_WITH_REF(InferenceEngine::Precision::FP16);
|
||||
COMPARE_WITH_REF(InferenceEngine::Precision::I64);
|
||||
#undef COMPARE_WITH_REF
|
||||
default:
|
||||
IE_THROW() << "Precision " << res->getTensorDesc().getPrecision().name()
|
||||
<< " is not covered by FuncTestUtils::compareBlobs() method";
|
||||
default:
|
||||
IE_THROW() << "Precision " << res->getTensorDesc().getPrecision().name()
|
||||
<< " is not covered by FuncTestUtils::compareBlobs() method";
|
||||
}
|
||||
}
|
||||
|
||||
inline void GetComparisonThreshold(InferenceEngine::Precision prc, float &absoluteThreshold, float &relativeThreshold) {
|
||||
inline void GetComparisonThreshold(InferenceEngine::Precision prc, float& absoluteThreshold, float& relativeThreshold) {
|
||||
switch (prc) {
|
||||
case InferenceEngine::Precision::FP32:
|
||||
absoluteThreshold = relativeThreshold = 1e-4f;
|
||||
break;
|
||||
case InferenceEngine::Precision::FP16:
|
||||
absoluteThreshold = relativeThreshold = 1e-2f;
|
||||
break;
|
||||
case InferenceEngine::Precision::I16:
|
||||
case InferenceEngine::Precision::I8:
|
||||
case InferenceEngine::Precision::U8:
|
||||
absoluteThreshold = relativeThreshold = 1;
|
||||
break;
|
||||
default:
|
||||
IE_THROW() << "Unhandled precision " << prc << " passed to the GetComparisonThreshold()";
|
||||
case InferenceEngine::Precision::FP32:
|
||||
absoluteThreshold = relativeThreshold = 1e-4f;
|
||||
break;
|
||||
case InferenceEngine::Precision::FP16:
|
||||
absoluteThreshold = relativeThreshold = 1e-2f;
|
||||
break;
|
||||
case InferenceEngine::Precision::I16:
|
||||
case InferenceEngine::Precision::I8:
|
||||
case InferenceEngine::Precision::U8:
|
||||
absoluteThreshold = relativeThreshold = 1;
|
||||
break;
|
||||
default:
|
||||
IE_THROW() << "Unhandled precision " << prc << " passed to the GetComparisonThreshold()";
|
||||
}
|
||||
}
|
||||
|
||||
@ -311,9 +339,9 @@ inline float GetComparisonThreshold(InferenceEngine::Precision prc) {
|
||||
}
|
||||
|
||||
// Copy from net_pass.h
|
||||
template<InferenceEngine::Precision::ePrecision PREC_FROM, InferenceEngine::Precision::ePrecision PREC_TO>
|
||||
inline void convertArrayPrecision(typename InferenceEngine::PrecisionTrait<PREC_TO>::value_type *dst,
|
||||
const typename InferenceEngine::PrecisionTrait<PREC_FROM>::value_type *src,
|
||||
template <InferenceEngine::Precision::ePrecision PREC_FROM, InferenceEngine::Precision::ePrecision PREC_TO>
|
||||
inline void convertArrayPrecision(typename InferenceEngine::PrecisionTrait<PREC_TO>::value_type* dst,
|
||||
const typename InferenceEngine::PrecisionTrait<PREC_FROM>::value_type* src,
|
||||
size_t nelem) {
|
||||
using dst_type = typename InferenceEngine::PrecisionTrait<PREC_TO>::value_type;
|
||||
|
||||
@ -322,190 +350,82 @@ inline void convertArrayPrecision(typename InferenceEngine::PrecisionTrait<PREC_
|
||||
}
|
||||
}
|
||||
|
||||
template<>
|
||||
inline void
|
||||
convertArrayPrecision<InferenceEngine::Precision::FP16, InferenceEngine::Precision::FP32>(float *dst, const short *src,
|
||||
size_t nelem) {
|
||||
template <>
|
||||
inline void convertArrayPrecision<InferenceEngine::Precision::FP16, InferenceEngine::Precision::FP32>(float* dst,
|
||||
const short* src,
|
||||
size_t nelem) {
|
||||
InferenceEngine::PrecisionUtils::f16tof32Arrays(dst, src, nelem, 1.0f, 0.0f);
|
||||
}
|
||||
|
||||
template<>
|
||||
inline void
|
||||
convertArrayPrecision<InferenceEngine::Precision::BF16, InferenceEngine::Precision::FP32>(float *dst, const short *src,
|
||||
size_t nelem) {
|
||||
template <>
|
||||
inline void convertArrayPrecision<InferenceEngine::Precision::BF16, InferenceEngine::Precision::FP32>(float* dst,
|
||||
const short* src,
|
||||
size_t nelem) {
|
||||
auto srcBf16 = reinterpret_cast<const ngraph::bfloat16*>(src);
|
||||
for (size_t i = 0; i < nelem; i++) {
|
||||
dst[i] = static_cast<float>(srcBf16[i]);
|
||||
}
|
||||
}
|
||||
|
||||
template<InferenceEngine::Precision::ePrecision PREC_FROM, InferenceEngine::Precision::ePrecision PREC_TO>
|
||||
inline InferenceEngine::Blob::Ptr convertBlobPrecision(const InferenceEngine::Blob::Ptr &blob) {
|
||||
template <InferenceEngine::Precision::ePrecision PREC_FROM, InferenceEngine::Precision::ePrecision PREC_TO>
|
||||
inline InferenceEngine::Blob::Ptr convertBlobPrecision(const InferenceEngine::Blob::Ptr& blob) {
|
||||
using from_d_type = typename InferenceEngine::PrecisionTrait<PREC_FROM>::value_type;
|
||||
using to_d_type = typename InferenceEngine::PrecisionTrait<PREC_TO>::value_type;
|
||||
|
||||
auto tensor_desc = blob->getTensorDesc();
|
||||
InferenceEngine::Blob::Ptr new_blob = InferenceEngine::make_shared_blob<to_d_type>(
|
||||
InferenceEngine::TensorDesc{PREC_TO, tensor_desc.getDims(), tensor_desc.getLayout()});
|
||||
InferenceEngine::TensorDesc{PREC_TO, tensor_desc.getDims(), tensor_desc.getLayout()});
|
||||
new_blob->allocate();
|
||||
auto target = new_blob->buffer().as<to_d_type *>();
|
||||
auto source = blob->buffer().as<from_d_type *>();
|
||||
auto target = new_blob->buffer().as<to_d_type*>();
|
||||
auto source = blob->buffer().as<from_d_type*>();
|
||||
convertArrayPrecision<PREC_FROM, PREC_TO>(target, source, blob->size());
|
||||
return new_blob;
|
||||
}
|
||||
// Copy from net_pass.h
|
||||
|
||||
|
||||
template<InferenceEngine::Precision::ePrecision targetPRC>
|
||||
inline InferenceEngine::Blob::Ptr copyBlobWithCast(const InferenceEngine::Blob::Ptr &blob) {
|
||||
template <InferenceEngine::Precision::ePrecision targetPRC>
|
||||
inline InferenceEngine::Blob::Ptr copyBlobWithCast(const InferenceEngine::Blob::Ptr& blob) {
|
||||
InferenceEngine::Blob::Ptr newBlob;
|
||||
switch (blob->getTensorDesc().getPrecision()) {
|
||||
case InferenceEngine::Precision::FP32:
|
||||
newBlob = FuncTestUtils::convertBlobPrecision<InferenceEngine::Precision::FP32, targetPRC>(blob);
|
||||
break;
|
||||
case InferenceEngine::Precision::FP16:
|
||||
newBlob = FuncTestUtils::convertBlobPrecision<InferenceEngine::Precision::FP16, targetPRC>(blob);
|
||||
break;
|
||||
case InferenceEngine::Precision::I16:
|
||||
newBlob = FuncTestUtils::convertBlobPrecision<InferenceEngine::Precision::I16, targetPRC>(blob);
|
||||
break;
|
||||
case InferenceEngine::Precision::I8:
|
||||
newBlob = FuncTestUtils::convertBlobPrecision<InferenceEngine::Precision::I8, targetPRC>(blob);
|
||||
break;
|
||||
case InferenceEngine::Precision::U8:
|
||||
newBlob = FuncTestUtils::convertBlobPrecision<InferenceEngine::Precision::U8, targetPRC>(blob);
|
||||
break;
|
||||
case InferenceEngine::Precision::I32:
|
||||
newBlob = FuncTestUtils::convertBlobPrecision<InferenceEngine::Precision::I32, targetPRC>(blob);
|
||||
break;
|
||||
case InferenceEngine::Precision::BOOL:
|
||||
newBlob = FuncTestUtils::convertBlobPrecision<InferenceEngine::Precision::BOOL, targetPRC>(blob);
|
||||
break;
|
||||
default:
|
||||
IE_THROW() << "Conversion from blob with precision " << blob->getTensorDesc().getPrecision().name()
|
||||
<< " not implemented yet!";
|
||||
case InferenceEngine::Precision::FP32:
|
||||
newBlob = FuncTestUtils::convertBlobPrecision<InferenceEngine::Precision::FP32, targetPRC>(blob);
|
||||
break;
|
||||
case InferenceEngine::Precision::FP16:
|
||||
newBlob = FuncTestUtils::convertBlobPrecision<InferenceEngine::Precision::FP16, targetPRC>(blob);
|
||||
break;
|
||||
case InferenceEngine::Precision::I16:
|
||||
newBlob = FuncTestUtils::convertBlobPrecision<InferenceEngine::Precision::I16, targetPRC>(blob);
|
||||
break;
|
||||
case InferenceEngine::Precision::I8:
|
||||
newBlob = FuncTestUtils::convertBlobPrecision<InferenceEngine::Precision::I8, targetPRC>(blob);
|
||||
break;
|
||||
case InferenceEngine::Precision::U8:
|
||||
newBlob = FuncTestUtils::convertBlobPrecision<InferenceEngine::Precision::U8, targetPRC>(blob);
|
||||
break;
|
||||
case InferenceEngine::Precision::I32:
|
||||
newBlob = FuncTestUtils::convertBlobPrecision<InferenceEngine::Precision::I32, targetPRC>(blob);
|
||||
break;
|
||||
case InferenceEngine::Precision::BOOL:
|
||||
newBlob = FuncTestUtils::convertBlobPrecision<InferenceEngine::Precision::BOOL, targetPRC>(blob);
|
||||
break;
|
||||
default:
|
||||
IE_THROW() << "Conversion from blob with precision " << blob->getTensorDesc().getPrecision().name()
|
||||
<< " not implemented yet!";
|
||||
}
|
||||
return newBlob;
|
||||
}
|
||||
|
||||
inline InferenceEngine::Blob::Ptr createAndFillBlobFloatNormalDistribution(const InferenceEngine::TensorDesc &td,
|
||||
inline InferenceEngine::Blob::Ptr createAndFillBlobFloatNormalDistribution(const InferenceEngine::TensorDesc& td,
|
||||
const float mean,
|
||||
const float stddev,
|
||||
const int32_t seed = 1) {
|
||||
InferenceEngine::Blob::Ptr blob = make_blob_with_precision(td);
|
||||
blob->allocate();
|
||||
switch (td.getPrecision()) {
|
||||
#define CASE(X) case X: ov::test::utils::fill_data_normal_random_float<X>(blob, mean, stddev, seed); break;
|
||||
CASE(InferenceEngine::Precision::FP32)
|
||||
CASE(InferenceEngine::Precision::FP16)
|
||||
CASE(InferenceEngine::Precision::U8)
|
||||
CASE(InferenceEngine::Precision::U16)
|
||||
CASE(InferenceEngine::Precision::I8)
|
||||
CASE(InferenceEngine::Precision::I16)
|
||||
CASE(InferenceEngine::Precision::I64)
|
||||
CASE(InferenceEngine::Precision::BIN)
|
||||
CASE(InferenceEngine::Precision::I32)
|
||||
CASE(InferenceEngine::Precision::BOOL)
|
||||
#undef CASE
|
||||
default:
|
||||
IE_THROW() << "Wrong precision specified: " << td.getPrecision().name();
|
||||
}
|
||||
return blob;
|
||||
}
|
||||
|
||||
inline InferenceEngine::Blob::Ptr createAndFillBlobFloat(const InferenceEngine::TensorDesc &td,
|
||||
const uint32_t range = 10,
|
||||
const int32_t start_from = 0,
|
||||
const int32_t resolution = 1,
|
||||
const int32_t seed = 1) {
|
||||
InferenceEngine::Blob::Ptr blob = make_blob_with_precision(td);
|
||||
|
||||
blob->allocate();
|
||||
switch (td.getPrecision()) {
|
||||
#define CASE(X) case X: ov::test::utils::fill_data_random_float<X>(blob, range, start_from, resolution, seed); break;
|
||||
CASE(InferenceEngine::Precision::FP32)
|
||||
CASE(InferenceEngine::Precision::FP16)
|
||||
CASE(InferenceEngine::Precision::U8)
|
||||
CASE(InferenceEngine::Precision::U16)
|
||||
CASE(InferenceEngine::Precision::I8)
|
||||
CASE(InferenceEngine::Precision::I16)
|
||||
CASE(InferenceEngine::Precision::I64)
|
||||
CASE(InferenceEngine::Precision::BIN)
|
||||
CASE(InferenceEngine::Precision::I32)
|
||||
CASE(InferenceEngine::Precision::BOOL)
|
||||
#undef CASE
|
||||
default:
|
||||
IE_THROW() << "Wrong precision specified: " << td.getPrecision().name();
|
||||
}
|
||||
return blob;
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
inline InferenceEngine::Blob::Ptr createAndFillBlobWithFloatArray(const InferenceEngine::TensorDesc &td,
|
||||
const T values[],
|
||||
const int size) {
|
||||
InferenceEngine::Blob::Ptr blob = make_blob_with_precision(td);
|
||||
blob->allocate();
|
||||
switch (td.getPrecision()) {
|
||||
#define CASE(X) case X: ov::test::utils::fill_data_float_array<X, T>(blob, values, size); break;
|
||||
CASE(InferenceEngine::Precision::FP32)
|
||||
CASE(InferenceEngine::Precision::FP16)
|
||||
CASE(InferenceEngine::Precision::U8)
|
||||
CASE(InferenceEngine::Precision::U16)
|
||||
CASE(InferenceEngine::Precision::I8)
|
||||
CASE(InferenceEngine::Precision::I16)
|
||||
CASE(InferenceEngine::Precision::I64)
|
||||
CASE(InferenceEngine::Precision::BIN)
|
||||
CASE(InferenceEngine::Precision::I32)
|
||||
CASE(InferenceEngine::Precision::BOOL)
|
||||
#undef CASE
|
||||
default:
|
||||
IE_THROW() << "Wrong precision specified: " << td.getPrecision().name();
|
||||
}
|
||||
return blob;
|
||||
}
|
||||
|
||||
inline InferenceEngine::Blob::Ptr createAndFillBlob(const InferenceEngine::TensorDesc &td,
|
||||
const uint32_t range = 10,
|
||||
const int32_t start_from = 0,
|
||||
const int32_t resolution = 1,
|
||||
const int seed = 1) {
|
||||
InferenceEngine::Blob::Ptr blob = make_blob_with_precision(td);
|
||||
blob->allocate();
|
||||
switch (td.getPrecision()) {
|
||||
#define CASE(X) case X: ov::test::utils::fill_data_random<X>(blob, range, start_from, resolution, seed); break;
|
||||
CASE(InferenceEngine::Precision::FP64)
|
||||
CASE(InferenceEngine::Precision::FP32)
|
||||
CASE(InferenceEngine::Precision::FP16)
|
||||
CASE(InferenceEngine::Precision::BF16)
|
||||
CASE(InferenceEngine::Precision::U4)
|
||||
CASE(InferenceEngine::Precision::U8)
|
||||
CASE(InferenceEngine::Precision::U32)
|
||||
CASE(InferenceEngine::Precision::U16)
|
||||
CASE(InferenceEngine::Precision::U64)
|
||||
CASE(InferenceEngine::Precision::I4)
|
||||
CASE(InferenceEngine::Precision::I8)
|
||||
CASE(InferenceEngine::Precision::I16)
|
||||
CASE(InferenceEngine::Precision::I32)
|
||||
CASE(InferenceEngine::Precision::I64)
|
||||
CASE(InferenceEngine::Precision::BIN)
|
||||
CASE(InferenceEngine::Precision::BOOL)
|
||||
#undef CASE
|
||||
default:
|
||||
IE_THROW() << "Wrong precision specified: " << td.getPrecision().name();
|
||||
}
|
||||
return blob;
|
||||
}
|
||||
|
||||
inline InferenceEngine::Blob::Ptr createAndFillBlobConsistently(
|
||||
const InferenceEngine::TensorDesc &td,
|
||||
const uint32_t range,
|
||||
const int32_t start_from,
|
||||
const int32_t resolution) {
|
||||
InferenceEngine::Blob::Ptr blob = make_blob_with_precision(td);
|
||||
blob->allocate();
|
||||
switch (td.getPrecision()) {
|
||||
#define CASE(X) case X: ov::test::utils::fill_data_consistently<X>(blob, range, start_from, resolution); break;
|
||||
#define CASE(X) \
|
||||
case X: \
|
||||
ov::test::utils::fill_data_normal_random_float<X>(blob, mean, stddev, seed); \
|
||||
break;
|
||||
CASE(InferenceEngine::Precision::FP32)
|
||||
CASE(InferenceEngine::Precision::FP16)
|
||||
CASE(InferenceEngine::Precision::U8)
|
||||
@ -523,17 +443,140 @@ inline InferenceEngine::Blob::Ptr createAndFillBlobConsistently(
|
||||
return blob;
|
||||
}
|
||||
|
||||
inline InferenceEngine::Blob::Ptr createAndFillBlobUniqueSequence(
|
||||
const InferenceEngine::TensorDesc &td,
|
||||
const int32_t start_from = 0,
|
||||
const int32_t resolution = 1,
|
||||
const int32_t seed = 1) {
|
||||
inline InferenceEngine::Blob::Ptr createAndFillBlobFloat(const InferenceEngine::TensorDesc& td,
|
||||
const uint32_t range = 10,
|
||||
const int32_t start_from = 0,
|
||||
const int32_t resolution = 1,
|
||||
const int32_t seed = 1) {
|
||||
InferenceEngine::Blob::Ptr blob = make_blob_with_precision(td);
|
||||
|
||||
blob->allocate();
|
||||
switch (td.getPrecision()) {
|
||||
#define CASE(X) \
|
||||
case X: \
|
||||
ov::test::utils::fill_data_random_float<X>(blob, range, start_from, resolution, seed); \
|
||||
break;
|
||||
CASE(InferenceEngine::Precision::FP32)
|
||||
CASE(InferenceEngine::Precision::FP16)
|
||||
CASE(InferenceEngine::Precision::U8)
|
||||
CASE(InferenceEngine::Precision::U16)
|
||||
CASE(InferenceEngine::Precision::I8)
|
||||
CASE(InferenceEngine::Precision::I16)
|
||||
CASE(InferenceEngine::Precision::I64)
|
||||
CASE(InferenceEngine::Precision::BIN)
|
||||
CASE(InferenceEngine::Precision::I32)
|
||||
CASE(InferenceEngine::Precision::BOOL)
|
||||
#undef CASE
|
||||
default:
|
||||
IE_THROW() << "Wrong precision specified: " << td.getPrecision().name();
|
||||
}
|
||||
return blob;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline InferenceEngine::Blob::Ptr createAndFillBlobWithFloatArray(const InferenceEngine::TensorDesc& td,
|
||||
const T values[],
|
||||
const int size) {
|
||||
InferenceEngine::Blob::Ptr blob = make_blob_with_precision(td);
|
||||
blob->allocate();
|
||||
switch (td.getPrecision()) {
|
||||
#define CASE(X) \
|
||||
case X: \
|
||||
ov::test::utils::fill_data_float_array<X, T>(blob, values, size); \
|
||||
break;
|
||||
CASE(InferenceEngine::Precision::FP32)
|
||||
CASE(InferenceEngine::Precision::FP16)
|
||||
CASE(InferenceEngine::Precision::U8)
|
||||
CASE(InferenceEngine::Precision::U16)
|
||||
CASE(InferenceEngine::Precision::I8)
|
||||
CASE(InferenceEngine::Precision::I16)
|
||||
CASE(InferenceEngine::Precision::I64)
|
||||
CASE(InferenceEngine::Precision::BIN)
|
||||
CASE(InferenceEngine::Precision::I32)
|
||||
CASE(InferenceEngine::Precision::BOOL)
|
||||
#undef CASE
|
||||
default:
|
||||
IE_THROW() << "Wrong precision specified: " << td.getPrecision().name();
|
||||
}
|
||||
return blob;
|
||||
}
|
||||
|
||||
inline InferenceEngine::Blob::Ptr createAndFillBlob(const InferenceEngine::TensorDesc& td,
|
||||
const uint32_t range = 10,
|
||||
const int32_t start_from = 0,
|
||||
const int32_t resolution = 1,
|
||||
const int seed = 1) {
|
||||
InferenceEngine::Blob::Ptr blob = make_blob_with_precision(td);
|
||||
blob->allocate();
|
||||
switch (td.getPrecision()) {
|
||||
#define CASE(X) \
|
||||
case X: \
|
||||
ov::test::utils::fill_data_random<X>(blob, range, start_from, resolution, seed); \
|
||||
break;
|
||||
CASE(InferenceEngine::Precision::FP64)
|
||||
CASE(InferenceEngine::Precision::FP32)
|
||||
CASE(InferenceEngine::Precision::FP16)
|
||||
CASE(InferenceEngine::Precision::BF16)
|
||||
CASE(InferenceEngine::Precision::U4)
|
||||
CASE(InferenceEngine::Precision::U8)
|
||||
CASE(InferenceEngine::Precision::U32)
|
||||
CASE(InferenceEngine::Precision::U16)
|
||||
CASE(InferenceEngine::Precision::U64)
|
||||
CASE(InferenceEngine::Precision::I4)
|
||||
CASE(InferenceEngine::Precision::I8)
|
||||
CASE(InferenceEngine::Precision::I16)
|
||||
CASE(InferenceEngine::Precision::I32)
|
||||
CASE(InferenceEngine::Precision::I64)
|
||||
CASE(InferenceEngine::Precision::BIN)
|
||||
CASE(InferenceEngine::Precision::BOOL)
|
||||
#undef CASE
|
||||
default:
|
||||
IE_THROW() << "Wrong precision specified: " << td.getPrecision().name();
|
||||
}
|
||||
return blob;
|
||||
}
|
||||
|
||||
inline InferenceEngine::Blob::Ptr createAndFillBlobConsistently(const InferenceEngine::TensorDesc& td,
|
||||
const uint32_t range,
|
||||
const int32_t start_from,
|
||||
const int32_t resolution) {
|
||||
InferenceEngine::Blob::Ptr blob = make_blob_with_precision(td);
|
||||
blob->allocate();
|
||||
switch (td.getPrecision()) {
|
||||
#define CASE(X) \
|
||||
case X: \
|
||||
ov::test::utils::fill_data_consistently<X>(blob, range, start_from, resolution); \
|
||||
break;
|
||||
CASE(InferenceEngine::Precision::FP32)
|
||||
CASE(InferenceEngine::Precision::FP16)
|
||||
CASE(InferenceEngine::Precision::U8)
|
||||
CASE(InferenceEngine::Precision::U16)
|
||||
CASE(InferenceEngine::Precision::I8)
|
||||
CASE(InferenceEngine::Precision::I16)
|
||||
CASE(InferenceEngine::Precision::I64)
|
||||
CASE(InferenceEngine::Precision::BIN)
|
||||
CASE(InferenceEngine::Precision::I32)
|
||||
CASE(InferenceEngine::Precision::BOOL)
|
||||
#undef CASE
|
||||
default:
|
||||
IE_THROW() << "Wrong precision specified: " << td.getPrecision().name();
|
||||
}
|
||||
return blob;
|
||||
}
|
||||
|
||||
inline InferenceEngine::Blob::Ptr createAndFillBlobUniqueSequence(const InferenceEngine::TensorDesc& td,
|
||||
const int32_t start_from = 0,
|
||||
const int32_t resolution = 1,
|
||||
const int32_t seed = 1) {
|
||||
InferenceEngine::Blob::Ptr blob = make_blob_with_precision(td);
|
||||
blob->allocate();
|
||||
auto shape = td.getDims();
|
||||
auto range = std::accumulate(begin(shape), end(shape), uint64_t(1), std::multiplies<uint64_t>()) * 2;
|
||||
switch (td.getPrecision()) {
|
||||
#define CASE(X) case X: ov::test::utils::fill_random_unique_sequence<X>(blob, range, start_from, resolution, seed); break;
|
||||
#define CASE(X) \
|
||||
case X: \
|
||||
ov::test::utils::fill_random_unique_sequence<X>(blob, range, start_from, resolution, seed); \
|
||||
break;
|
||||
CASE(InferenceEngine::Precision::FP32)
|
||||
CASE(InferenceEngine::Precision::FP16)
|
||||
CASE(InferenceEngine::Precision::U8)
|
||||
@ -569,7 +612,7 @@ inline InferenceEngine::Blob::Ptr convertBlobLayout(const InferenceEngine::Blob:
|
||||
return out;
|
||||
}
|
||||
|
||||
template<typename dType>
|
||||
template <typename dType>
|
||||
inline void fillInputsBySinValues(dType* data, size_t size) {
|
||||
if (std::is_same<dType, float>::value) {
|
||||
for (size_t i = 0; i < size; i++) {
|
||||
@ -598,9 +641,9 @@ inline int fillInputsBySinValues(InferenceEngine::Blob::Ptr blob) {
|
||||
namespace Bf16TestUtils {
|
||||
|
||||
#if defined __GNUC__
|
||||
# pragma GCC diagnostic push
|
||||
# pragma GCC diagnostic ignored "-Wstrict-aliasing"
|
||||
# pragma GCC diagnostic ignored "-Wuninitialized"
|
||||
# pragma GCC diagnostic push
|
||||
# pragma GCC diagnostic ignored "-Wstrict-aliasing"
|
||||
# pragma GCC diagnostic ignored "-Wuninitialized"
|
||||
#endif
|
||||
|
||||
inline float reducePrecisionBitwise(const float in) {
|
||||
@ -625,7 +668,7 @@ inline short reducePrecisionBitwiseS(const float in) {
|
||||
}
|
||||
|
||||
#if defined __GNUC__
|
||||
# pragma GCC diagnostic pop
|
||||
# pragma GCC diagnostic pop
|
||||
#endif
|
||||
|
||||
} // namespace Bf16TestUtils
|
||||
@ -666,55 +709,56 @@ inline InferenceEngine::Blob::Ptr createBlobByType(const InferenceEngine::Tensor
|
||||
for (size_t i = 0; i < subBlobsNum; i++) {
|
||||
subBlobs.push_back(createAndFillBlob(subBlobDesc));
|
||||
}
|
||||
return blobType == BlobType::Batched ? InferenceEngine::make_shared_blob<InferenceEngine::BatchedBlob>(subBlobs) :
|
||||
InferenceEngine::make_shared_blob<InferenceEngine::CompoundBlob>(subBlobs);
|
||||
return blobType == BlobType::Batched
|
||||
? InferenceEngine::make_shared_blob<InferenceEngine::BatchedBlob>(subBlobs)
|
||||
: InferenceEngine::make_shared_blob<InferenceEngine::CompoundBlob>(subBlobs);
|
||||
}
|
||||
// TODO: ocl + remote
|
||||
// case BlobType::Remote:
|
||||
// return InferenceEngine::as<InferenceEngine::RemoteBlob>(createAndFillBlob(td));
|
||||
// TODO: ocl + remote
|
||||
// case BlobType::Remote:
|
||||
// return InferenceEngine::as<InferenceEngine::RemoteBlob>(createAndFillBlob(td));
|
||||
default:
|
||||
IE_THROW() << "Test does not support the blob kind";
|
||||
}
|
||||
}
|
||||
|
||||
inline bool checkLayout(InferenceEngine::Layout layout, const std::vector<size_t> &inputShapes) {
|
||||
inline bool checkLayout(InferenceEngine::Layout layout, const std::vector<size_t>& inputShapes) {
|
||||
bool check = false;
|
||||
switch (layout) {
|
||||
case InferenceEngine::Layout::SCALAR:
|
||||
check = inputShapes.size() == 0;
|
||||
break;
|
||||
case InferenceEngine::Layout::C:
|
||||
check = 1 == inputShapes.size();
|
||||
break;
|
||||
case InferenceEngine::Layout::BLOCKED:
|
||||
case InferenceEngine::Layout::ANY:
|
||||
check = true;
|
||||
break;
|
||||
case InferenceEngine::Layout::GOIDHW:
|
||||
check = 6 == inputShapes.size();
|
||||
break;
|
||||
case InferenceEngine::Layout::NCDHW:
|
||||
case InferenceEngine::Layout::NDHWC:
|
||||
case InferenceEngine::Layout::OIDHW:
|
||||
case InferenceEngine::Layout::GOIHW:
|
||||
check = 5 == inputShapes.size();
|
||||
break;
|
||||
case InferenceEngine::Layout::OIHW:
|
||||
case InferenceEngine::Layout::NCHW:
|
||||
case InferenceEngine::Layout::NHWC:
|
||||
check = 4 == inputShapes.size();
|
||||
break;
|
||||
case InferenceEngine::Layout::CHW:
|
||||
case InferenceEngine::Layout::HWC:
|
||||
check = 3 == inputShapes.size();
|
||||
break;
|
||||
case InferenceEngine::Layout::CN:
|
||||
case InferenceEngine::Layout::NC:
|
||||
case InferenceEngine::Layout::HW:
|
||||
check = 2 == inputShapes.size();
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
case InferenceEngine::Layout::SCALAR:
|
||||
check = inputShapes.size() == 0;
|
||||
break;
|
||||
case InferenceEngine::Layout::C:
|
||||
check = 1 == inputShapes.size();
|
||||
break;
|
||||
case InferenceEngine::Layout::BLOCKED:
|
||||
case InferenceEngine::Layout::ANY:
|
||||
check = true;
|
||||
break;
|
||||
case InferenceEngine::Layout::GOIDHW:
|
||||
check = 6 == inputShapes.size();
|
||||
break;
|
||||
case InferenceEngine::Layout::NCDHW:
|
||||
case InferenceEngine::Layout::NDHWC:
|
||||
case InferenceEngine::Layout::OIDHW:
|
||||
case InferenceEngine::Layout::GOIHW:
|
||||
check = 5 == inputShapes.size();
|
||||
break;
|
||||
case InferenceEngine::Layout::OIHW:
|
||||
case InferenceEngine::Layout::NCHW:
|
||||
case InferenceEngine::Layout::NHWC:
|
||||
check = 4 == inputShapes.size();
|
||||
break;
|
||||
case InferenceEngine::Layout::CHW:
|
||||
case InferenceEngine::Layout::HWC:
|
||||
check = 3 == inputShapes.size();
|
||||
break;
|
||||
case InferenceEngine::Layout::CN:
|
||||
case InferenceEngine::Layout::NC:
|
||||
case InferenceEngine::Layout::HW:
|
||||
check = 2 == inputShapes.size();
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return check;
|
||||
}
|
||||
|
@ -5,12 +5,11 @@
|
||||
#pragma once
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
#include <setjmp.h>
|
||||
#include <signal.h>
|
||||
|
||||
#include "common_test_utils/common_utils.hpp"
|
||||
|
||||
#include <signal.h>
|
||||
#include <setjmp.h>
|
||||
|
||||
namespace ov {
|
||||
namespace test {
|
||||
namespace utils {
|
||||
@ -24,6 +23,7 @@ class CrashHandler {
|
||||
private:
|
||||
static unsigned int MAX_TEST_WORK_TIME;
|
||||
static bool IGNORE_CRASH;
|
||||
|
||||
public:
|
||||
CrashHandler(CONFORMANCE_TYPE type = CONFORMANCE_TYPE::op);
|
||||
~CrashHandler();
|
||||
|
@ -7,7 +7,9 @@
|
||||
|
||||
#include "openvino/opsets/opset.hpp"
|
||||
|
||||
namespace FuncTestUtils {
|
||||
namespace ov {
|
||||
namespace test {
|
||||
namespace utils {
|
||||
|
||||
inline std::string get_op_version(std::string version_full_name) {
|
||||
std::string op_version(version_full_name);
|
||||
@ -38,4 +40,6 @@ static std::map<std::string, std::set<std::string>> get_unique_ops() {
|
||||
return res;
|
||||
}
|
||||
|
||||
} // namespace FuncTestUtils
|
||||
} // namespace utils
|
||||
} // namespace test
|
||||
} // namespace ov
|
||||
|
@ -16,15 +16,15 @@ namespace utils {
|
||||
|
||||
class PluginCache {
|
||||
public:
|
||||
std::shared_ptr<ov::Core> core(const std::string &deviceToCheck = std::string());
|
||||
std::shared_ptr<ov::Core> core(const std::string& deviceToCheck = std::string());
|
||||
|
||||
static PluginCache &get();
|
||||
static PluginCache& get();
|
||||
|
||||
void reset();
|
||||
|
||||
PluginCache(const PluginCache &) = delete;
|
||||
PluginCache(const PluginCache&) = delete;
|
||||
|
||||
PluginCache &operator=(const PluginCache &) = delete;
|
||||
PluginCache& operator=(const PluginCache&) = delete;
|
||||
|
||||
private:
|
||||
PluginCache();
|
||||
|
@ -4,15 +4,14 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <ie_core.hpp>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <string>
|
||||
|
||||
#include <ie_core.hpp>
|
||||
|
||||
class PluginCache {
|
||||
public:
|
||||
std::shared_ptr<InferenceEngine::Core> ie(const std::string &deviceToCheck = std::string());
|
||||
std::shared_ptr<InferenceEngine::Core> ie(const std::string& deviceToCheck = std::string());
|
||||
|
||||
static PluginCache& get();
|
||||
|
||||
|
@ -4,9 +4,8 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <string>
|
||||
|
||||
#include <ngraph/type/element_type.hpp>
|
||||
#include <string>
|
||||
|
||||
#include "ie_precision.hpp"
|
||||
|
||||
@ -17,45 +16,45 @@ namespace PrecisionUtils {
|
||||
inline ::ngraph::element::Type convertIE2nGraphPrc(const InferenceEngine::Precision& precision) {
|
||||
InferenceEngine::Precision::ePrecision pType = precision;
|
||||
switch (pType) {
|
||||
case InferenceEngine::Precision::UNSPECIFIED:
|
||||
return ::ngraph::element::Type(::ngraph::element::Type_t::undefined);
|
||||
case InferenceEngine::Precision::FP64:
|
||||
return ::ngraph::element::Type(::ngraph::element::Type_t::f64);
|
||||
case InferenceEngine::Precision::FP32:
|
||||
return ::ngraph::element::Type(::ngraph::element::Type_t::f32);
|
||||
case InferenceEngine::Precision::FP16:
|
||||
return ::ngraph::element::Type(::ngraph::element::Type_t::f16);
|
||||
case InferenceEngine::Precision::BF16:
|
||||
return ::ngraph::element::Type(::ngraph::element::Type_t::bf16);
|
||||
case InferenceEngine::Precision::U4:
|
||||
return ::ngraph::element::Type(::ngraph::element::Type_t::u4);
|
||||
case InferenceEngine::Precision::I4:
|
||||
return ::ngraph::element::Type(::ngraph::element::Type_t::i4);
|
||||
case InferenceEngine::Precision::U8:
|
||||
return ::ngraph::element::Type(::ngraph::element::Type_t::u8);
|
||||
case InferenceEngine::Precision::I8:
|
||||
return ::ngraph::element::Type(::ngraph::element::Type_t::i8);
|
||||
case InferenceEngine::Precision::U16:
|
||||
return ::ngraph::element::Type(::ngraph::element::Type_t::u16);
|
||||
case InferenceEngine::Precision::I16:
|
||||
return ::ngraph::element::Type(::ngraph::element::Type_t::i16);
|
||||
case InferenceEngine::Precision::U32:
|
||||
return ::ngraph::element::Type(::ngraph::element::Type_t::u32);
|
||||
case InferenceEngine::Precision::I32:
|
||||
return ::ngraph::element::Type(::ngraph::element::Type_t::i32);
|
||||
case InferenceEngine::Precision::I64:
|
||||
return ::ngraph::element::Type(::ngraph::element::Type_t::i64);
|
||||
case InferenceEngine::Precision::U64:
|
||||
return ::ngraph::element::Type(::ngraph::element::Type_t::u64);
|
||||
case InferenceEngine::Precision::BOOL:
|
||||
return ::ngraph::element::Type(::ngraph::element::Type_t::boolean);
|
||||
case InferenceEngine::Precision::BIN:
|
||||
return ::ngraph::element::Type(::ngraph::element::Type_t::u1);
|
||||
case InferenceEngine::Precision::Q78:
|
||||
case InferenceEngine::Precision::MIXED:
|
||||
case InferenceEngine::Precision::CUSTOM:
|
||||
default:
|
||||
IE_THROW() << "Incorrect precision!";
|
||||
case InferenceEngine::Precision::UNSPECIFIED:
|
||||
return ::ngraph::element::Type(::ngraph::element::Type_t::undefined);
|
||||
case InferenceEngine::Precision::FP64:
|
||||
return ::ngraph::element::Type(::ngraph::element::Type_t::f64);
|
||||
case InferenceEngine::Precision::FP32:
|
||||
return ::ngraph::element::Type(::ngraph::element::Type_t::f32);
|
||||
case InferenceEngine::Precision::FP16:
|
||||
return ::ngraph::element::Type(::ngraph::element::Type_t::f16);
|
||||
case InferenceEngine::Precision::BF16:
|
||||
return ::ngraph::element::Type(::ngraph::element::Type_t::bf16);
|
||||
case InferenceEngine::Precision::U4:
|
||||
return ::ngraph::element::Type(::ngraph::element::Type_t::u4);
|
||||
case InferenceEngine::Precision::I4:
|
||||
return ::ngraph::element::Type(::ngraph::element::Type_t::i4);
|
||||
case InferenceEngine::Precision::U8:
|
||||
return ::ngraph::element::Type(::ngraph::element::Type_t::u8);
|
||||
case InferenceEngine::Precision::I8:
|
||||
return ::ngraph::element::Type(::ngraph::element::Type_t::i8);
|
||||
case InferenceEngine::Precision::U16:
|
||||
return ::ngraph::element::Type(::ngraph::element::Type_t::u16);
|
||||
case InferenceEngine::Precision::I16:
|
||||
return ::ngraph::element::Type(::ngraph::element::Type_t::i16);
|
||||
case InferenceEngine::Precision::U32:
|
||||
return ::ngraph::element::Type(::ngraph::element::Type_t::u32);
|
||||
case InferenceEngine::Precision::I32:
|
||||
return ::ngraph::element::Type(::ngraph::element::Type_t::i32);
|
||||
case InferenceEngine::Precision::I64:
|
||||
return ::ngraph::element::Type(::ngraph::element::Type_t::i64);
|
||||
case InferenceEngine::Precision::U64:
|
||||
return ::ngraph::element::Type(::ngraph::element::Type_t::u64);
|
||||
case InferenceEngine::Precision::BOOL:
|
||||
return ::ngraph::element::Type(::ngraph::element::Type_t::boolean);
|
||||
case InferenceEngine::Precision::BIN:
|
||||
return ::ngraph::element::Type(::ngraph::element::Type_t::u1);
|
||||
case InferenceEngine::Precision::Q78:
|
||||
case InferenceEngine::Precision::MIXED:
|
||||
case InferenceEngine::Precision::CUSTOM:
|
||||
default:
|
||||
IE_THROW() << "Incorrect precision!";
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -4,28 +4,40 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <regex>
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include <regex>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
std::vector<std::string> disabledTestPatterns();
|
||||
|
||||
namespace FuncTestUtils {
|
||||
namespace SkipTestsConfig {
|
||||
namespace ov {
|
||||
namespace test {
|
||||
namespace utils {
|
||||
|
||||
extern bool disable_tests_skipping;
|
||||
|
||||
bool currentTestIsDisabled();
|
||||
bool current_test_is_disabled();
|
||||
|
||||
} // namespace utils
|
||||
} // namespace test
|
||||
} // namespace ov
|
||||
|
||||
// TODO: Remove after migration of internal components
|
||||
namespace FuncTestUtils {
|
||||
namespace SkipTestsConfig {
|
||||
|
||||
inline bool currentTestIsDisabled() {
|
||||
return ov::test::utils::current_test_is_disabled();
|
||||
}
|
||||
|
||||
} // namespace SkipTestsConfig
|
||||
} // namespace FuncTestUtils
|
||||
|
||||
#define SKIP_IF_CURRENT_TEST_IS_DISABLED() \
|
||||
{ \
|
||||
if (FuncTestUtils::SkipTestsConfig::currentTestIsDisabled()) { \
|
||||
GTEST_SKIP() << "Disabled test due to configuration" << std::endl; \
|
||||
} \
|
||||
}
|
||||
#define SKIP_IF_CURRENT_TEST_IS_DISABLED() \
|
||||
{ \
|
||||
if (ov::test::utils::current_test_is_disabled()) { \
|
||||
GTEST_SKIP() << "Disabled test due to configuration" << std::endl; \
|
||||
} \
|
||||
}
|
||||
|
@ -24,17 +24,17 @@ class ApiSummary;
|
||||
|
||||
class ApiSummaryDestroyer {
|
||||
private:
|
||||
ApiSummary *p_instance;
|
||||
ApiSummary* p_instance;
|
||||
|
||||
public:
|
||||
~ApiSummaryDestroyer();
|
||||
|
||||
void initialize(ApiSummary *p);
|
||||
void initialize(ApiSummary* p);
|
||||
};
|
||||
|
||||
class ApiSummary : public virtual Summary {
|
||||
private:
|
||||
static ApiSummary *p_instance;
|
||||
static ApiSummary* p_instance;
|
||||
std::map<ov_entity, std::map<std::string, PassRate>> apiStats;
|
||||
static const std::map<ov_entity, std::string> apiInfo;
|
||||
ov_entity getOvEntityByName(const std::string& name);
|
||||
@ -45,14 +45,15 @@ protected:
|
||||
friend class ApiSummaryDestroyer;
|
||||
|
||||
public:
|
||||
static ApiSummary &getInstance();
|
||||
static ApiSummary& getInstance();
|
||||
inline void getStatisticFromReport(const std::string& filePath);
|
||||
std::map<ov_entity, std::map<std::string, PassRate>> getApiStats() { return apiStats; }
|
||||
std::map<ov_entity, std::map<std::string, PassRate>> getApiStats() {
|
||||
return apiStats;
|
||||
}
|
||||
void updateStat(ov_entity, const std::string& device, PassRate::Statuses, double rel_influence_coef = 1);
|
||||
void saveReport() override;
|
||||
};
|
||||
|
||||
|
||||
} // namespace utils
|
||||
} // namespace test
|
||||
} // namespace ov
|
||||
|
@ -6,10 +6,8 @@
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include "ngraph/ngraph.hpp"
|
||||
|
||||
#include "functional_test_utils/summary/op_summary.hpp"
|
||||
#include "functional_test_utils/summary/api_summary.hpp"
|
||||
#include "functional_test_utils/summary/op_summary.hpp"
|
||||
|
||||
namespace ov {
|
||||
namespace test {
|
||||
|
@ -12,11 +12,11 @@ namespace functional {
|
||||
|
||||
// todo: reuse in summary
|
||||
std::string get_node_version(const std::shared_ptr<ov::Node>& node, const std::string& postfix = "");
|
||||
|
||||
} // namespace functional
|
||||
} // namespace test
|
||||
} // namespace ov
|
||||
|
||||
|
||||
// todo: remove these structure after remove old subgraphs dumper
|
||||
namespace LayerTestsUtils {
|
||||
|
||||
@ -45,4 +45,4 @@ struct OPInfo {
|
||||
|
||||
OPInfo() = default;
|
||||
};
|
||||
} // namespace LayerTestsUtils
|
||||
} // namespace LayerTestsUtils
|
||||
|
@ -4,11 +4,10 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "summary.hpp"
|
||||
|
||||
#include "openvino/opsets/opset.hpp"
|
||||
#include "openvino/openvino.hpp"
|
||||
#include "openvino/opsets/opset.hpp"
|
||||
#include "openvino/opsets/opset10.hpp"
|
||||
#include "summary.hpp"
|
||||
|
||||
namespace ov {
|
||||
namespace test {
|
||||
@ -18,16 +17,17 @@ class OpSummary;
|
||||
|
||||
class OpSummaryDestroyer {
|
||||
private:
|
||||
OpSummary *p_instance;
|
||||
OpSummary* p_instance;
|
||||
|
||||
public:
|
||||
~OpSummaryDestroyer();
|
||||
|
||||
void initialize(OpSummary *p);
|
||||
void initialize(OpSummary* p);
|
||||
};
|
||||
|
||||
class OpSummary : public virtual Summary {
|
||||
private:
|
||||
static OpSummary *p_instance;
|
||||
static OpSummary* p_instance;
|
||||
static bool extractBody;
|
||||
std::map<ov::NodeTypeInfo, PassRate> opsStats = {};
|
||||
|
||||
@ -39,21 +39,29 @@ protected:
|
||||
friend class OpSummaryDestroyer;
|
||||
|
||||
public:
|
||||
static OpSummary &getInstance();
|
||||
static OpSummary& getInstance();
|
||||
|
||||
std::map<ov::NodeTypeInfo, PassRate> getOPsStats() { return opsStats; }
|
||||
std::map<ov::NodeTypeInfo, PassRate> getOPsStats() {
|
||||
return opsStats;
|
||||
}
|
||||
|
||||
static void setExtractBody(bool val) { extractBody = val; }
|
||||
static bool getExtractBody() { return extractBody; }
|
||||
static void setExtractBody(bool val) {
|
||||
extractBody = val;
|
||||
}
|
||||
static bool getExtractBody() {
|
||||
return extractBody;
|
||||
}
|
||||
|
||||
std::map<std::string, PassRate> getStatisticFromReport();
|
||||
void saveReport() override;
|
||||
|
||||
void updateOPsStats(const std::shared_ptr<ov::Model> &model, const PassRate::Statuses &status, double rel_influence_coef = 1);
|
||||
void updateOPsImplStatus(const std::shared_ptr<ov::Model> &model, const bool implStatus);
|
||||
void updateOPsStats(const std::shared_ptr<ov::Model>& model,
|
||||
const PassRate::Statuses& status,
|
||||
double rel_influence_coef = 1);
|
||||
void updateOPsImplStatus(const std::shared_ptr<ov::Model>& model, const bool implStatus);
|
||||
|
||||
void updateOPsStats(const ov::NodeTypeInfo &op, const PassRate::Statuses &status, double rel_influence_coef = 1);
|
||||
void updateOPsImplStatus(const ov::NodeTypeInfo &op, const bool implStatus);
|
||||
void updateOPsStats(const ov::NodeTypeInfo& op, const PassRate::Statuses& status, double rel_influence_coef = 1);
|
||||
void updateOPsImplStatus(const ov::NodeTypeInfo& op, const bool implStatus);
|
||||
};
|
||||
|
||||
} // namespace utils
|
||||
|
@ -4,26 +4,18 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <map>
|
||||
#include <fstream>
|
||||
#include <map>
|
||||
|
||||
#include "openvino/openvino.hpp"
|
||||
|
||||
#include "common_test_utils/test_constants.hpp"
|
||||
#include "common_test_utils/common_utils.hpp"
|
||||
#include "common_test_utils/test_constants.hpp"
|
||||
|
||||
namespace ov {
|
||||
namespace test {
|
||||
namespace utils {
|
||||
|
||||
struct PassRate {
|
||||
enum Statuses {
|
||||
PASSED,
|
||||
FAILED,
|
||||
SKIPPED,
|
||||
CRASHED,
|
||||
HANGED
|
||||
};
|
||||
enum Statuses { PASSED, FAILED, SKIPPED, CRASHED, HANGED };
|
||||
unsigned long passed = 0;
|
||||
unsigned long failed = 0;
|
||||
unsigned long skipped = 0;
|
||||
@ -36,7 +28,13 @@ struct PassRate {
|
||||
|
||||
PassRate() = default;
|
||||
|
||||
PassRate(unsigned long p, unsigned long f, unsigned long s, unsigned long c, unsigned long h, double rel_p = 0, double rel_a = 0);
|
||||
PassRate(unsigned long p,
|
||||
unsigned long f,
|
||||
unsigned long s,
|
||||
unsigned long c,
|
||||
unsigned long h,
|
||||
double rel_p = 0,
|
||||
double rel_a = 0);
|
||||
|
||||
void setImplementationStatus(bool implStatus);
|
||||
|
||||
@ -57,7 +55,7 @@ protected:
|
||||
static bool isHangReported;
|
||||
static bool extendReport;
|
||||
static bool saveReportWithUniqueName;
|
||||
static const char *outputFolder;
|
||||
static const char* outputFolder;
|
||||
|
||||
Summary() = default;
|
||||
|
||||
@ -68,13 +66,17 @@ public:
|
||||
|
||||
std::string getDeviceName() const;
|
||||
|
||||
|
||||
// #define IE_TEST_DEBUG
|
||||
|
||||
#ifdef IE_TEST_DEBUG
|
||||
void saveDebugReport(const char* className, const char* opName, unsigned long passed, unsigned long failed,
|
||||
unsigned long skipped, unsigned long crashed, unsigned long hanged);
|
||||
#endif //IE_TEST_DEBUG
|
||||
void saveDebugReport(const char* className,
|
||||
const char* opName,
|
||||
unsigned long passed,
|
||||
unsigned long failed,
|
||||
unsigned long skipped,
|
||||
unsigned long crashed,
|
||||
unsigned long hanged);
|
||||
#endif // IE_TEST_DEBUG
|
||||
|
||||
virtual void saveReport() {}
|
||||
|
||||
@ -89,7 +91,7 @@ public:
|
||||
static void setSaveReportTimeout(size_t val);
|
||||
static size_t getSaveReportTimeout();
|
||||
|
||||
static void setOutputFolder(const std::string &val);
|
||||
static void setOutputFolder(const std::string& val);
|
||||
};
|
||||
|
||||
} // namespace utils
|
||||
|
@ -7,25 +7,26 @@
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "inference_engine.hpp"
|
||||
#include "openvino/core/partial_shape.hpp"
|
||||
#include "openvino/core/type/element_type.hpp"
|
||||
|
||||
namespace FuncTestUtils {
|
||||
namespace TestModel {
|
||||
namespace ov {
|
||||
namespace test {
|
||||
namespace utils {
|
||||
|
||||
/**
|
||||
* @brief generates IR files (XML and BIN files) with the test model.
|
||||
* Passed reference vector is filled with CNN layers to validate after the network reading.
|
||||
* @param modelPath used to serialize the generated network
|
||||
* @param weightsPath used to serialize the generated weights
|
||||
* @param netPrc precision of the generated network
|
||||
* @param inputDims dims on the input layer of the generated network
|
||||
* Passed reference vector is filled with OpenVINO operations to validate after the network reading.
|
||||
* @param model_path used to serialize the generated network
|
||||
* @param weights_path used to serialize the generated weights
|
||||
* @param input_type input element type of the generated model
|
||||
* @param input_shape dims on the input layer of the generated model
|
||||
*/
|
||||
void generateTestModel(const std::string &modelPath,
|
||||
const std::string &weightsPath,
|
||||
const InferenceEngine::Precision &netPrc = InferenceEngine::Precision::FP32,
|
||||
const InferenceEngine::SizeVector &inputDims = {1, 3, 227, 227});
|
||||
void generate_test_model(const std::string& model_path,
|
||||
const std::string& weights_path,
|
||||
const ov::element::Type& input_type = ov::element::f32,
|
||||
const ov::PartialShape& input_shape = {1, 3, 227, 227});
|
||||
|
||||
const char incorrect_input_name[] = "incorrect_input_name";
|
||||
|
||||
} // namespace TestModel
|
||||
} // namespace FuncTestUtils
|
||||
} // namespace utils
|
||||
} // namespace test
|
||||
} // namespace ov
|
||||
|
@ -2,27 +2,28 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "functional_test_utils/summary/op_summary.hpp"
|
||||
#include "functional_test_utils/summary/api_summary.hpp"
|
||||
|
||||
#include "functional_test_utils/crash_handler.hpp"
|
||||
#include <signal.h>
|
||||
|
||||
#include <limits.h>
|
||||
#include <signal.h>
|
||||
|
||||
#include "functional_test_utils/summary/api_summary.hpp"
|
||||
#include "functional_test_utils/summary/op_summary.hpp"
|
||||
|
||||
namespace ov {
|
||||
namespace test {
|
||||
namespace utils {
|
||||
|
||||
#if defined(__APPLE__)
|
||||
typedef sig_t sighandler;
|
||||
typedef sig_t sighandler;
|
||||
#elif defined(_WIN32)
|
||||
#ifdef __GNUC__
|
||||
typedef __p_sig_fn_t sighandler;
|
||||
# ifdef __GNUC__
|
||||
typedef __p_sig_fn_t sighandler;
|
||||
# else
|
||||
typedef _crt_signal_t sighandler;
|
||||
# endif
|
||||
#else
|
||||
typedef _crt_signal_t sighandler;
|
||||
#endif
|
||||
#else
|
||||
typedef sighandler_t sighandler;
|
||||
typedef sighandler_t sighandler;
|
||||
#endif
|
||||
|
||||
// enviroment to restore in case of crash
|
||||
@ -51,7 +52,7 @@ CrashHandler::CrashHandler(CONFORMANCE_TYPE type) {
|
||||
#endif
|
||||
|
||||
if (!CrashHandler::IGNORE_CRASH) {
|
||||
auto &s = ov::test::utils::OpSummary::getInstance();
|
||||
auto& s = ov::test::utils::OpSummary::getInstance();
|
||||
s.saveReport();
|
||||
std::abort();
|
||||
}
|
||||
@ -73,7 +74,8 @@ CrashHandler::CrashHandler(CONFORMANCE_TYPE type) {
|
||||
|
||||
if (type == CONFORMANCE_TYPE::api) {
|
||||
crashHandler = [](int errCode) {
|
||||
std::cerr << "Unexpected application crash with code: " << errCode << ". Program will aborted." << std::endl;
|
||||
std::cerr << "Unexpected application crash with code: " << errCode << ". Program will aborted."
|
||||
<< std::endl;
|
||||
|
||||
// reset custom signal handler to avoid infinit loop
|
||||
// if for some reasons sigsetjmp will not be available
|
||||
@ -85,7 +87,7 @@ CrashHandler::CrashHandler(CONFORMANCE_TYPE type) {
|
||||
signal(SIGFPE, SIG_DFL);
|
||||
signal(SIGALRM, SIG_DFL);
|
||||
#endif
|
||||
auto &s = ov::test::utils::ApiSummary::getInstance();
|
||||
auto& s = ov::test::utils::ApiSummary::getInstance();
|
||||
s.saveReport();
|
||||
std::abort();
|
||||
};
|
||||
|
@ -3,8 +3,6 @@
|
||||
//
|
||||
|
||||
#include "functional_test_utils/ov_plugin_cache.hpp"
|
||||
#include "common_test_utils/file_utils.hpp"
|
||||
#include "openvino/util/file_util.hpp"
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
@ -12,13 +10,16 @@
|
||||
#include <ie_plugin_config.hpp>
|
||||
#include <unordered_map>
|
||||
|
||||
#include "common_test_utils/file_utils.hpp"
|
||||
#include "openvino/util/file_util.hpp"
|
||||
|
||||
namespace ov {
|
||||
namespace test {
|
||||
namespace utils {
|
||||
namespace {
|
||||
class TestListener : public testing::EmptyTestEventListener {
|
||||
public:
|
||||
void OnTestEnd(const testing::TestInfo &testInfo) override {
|
||||
void OnTestEnd(const testing::TestInfo& testInfo) override {
|
||||
if (auto testResult = testInfo.result()) {
|
||||
if (testResult->Failed()) {
|
||||
PluginCache::get().reset();
|
||||
@ -28,12 +29,12 @@ public:
|
||||
};
|
||||
} // namespace
|
||||
|
||||
PluginCache &PluginCache::get() {
|
||||
PluginCache& PluginCache::get() {
|
||||
static PluginCache instance;
|
||||
return instance;
|
||||
}
|
||||
|
||||
std::shared_ptr<ov::Core> PluginCache::core(const std::string &deviceToCheck) {
|
||||
std::shared_ptr<ov::Core> PluginCache::core(const std::string& deviceToCheck) {
|
||||
std::lock_guard<std::mutex> lock(g_mtx);
|
||||
if (std::getenv("DISABLE_PLUGIN_CACHE") != nullptr) {
|
||||
#ifndef NDEBUG
|
||||
@ -58,7 +59,9 @@ std::shared_ptr<ov::Core> PluginCache::core(const std::string &deviceToCheck) {
|
||||
try {
|
||||
std::string pluginName = "openvino_template_plugin";
|
||||
pluginName += IE_BUILD_POSTFIX;
|
||||
ov_core->register_plugin(ov::util::make_plugin_library_name(ov::test::utils::getExecutableDirectory(), pluginName), "TEMPLATE");
|
||||
ov_core->register_plugin(
|
||||
ov::util::make_plugin_library_name(ov::test::utils::getExecutableDirectory(), pluginName),
|
||||
"TEMPLATE");
|
||||
} catch (...) {
|
||||
}
|
||||
|
||||
@ -66,8 +69,7 @@ std::shared_ptr<ov::Core> PluginCache::core(const std::string &deviceToCheck) {
|
||||
auto properties = ov_core->get_property(deviceToCheck, ov::supported_properties);
|
||||
|
||||
if (std::find(properties.begin(), properties.end(), ov::available_devices) != properties.end()) {
|
||||
auto availableDevices =
|
||||
ov_core->get_property(deviceToCheck, ov::available_devices);
|
||||
auto availableDevices = ov_core->get_property(deviceToCheck, ov::available_devices);
|
||||
|
||||
if (availableDevices.empty()) {
|
||||
std::cerr << "No available devices for " << deviceToCheck << std::endl;
|
||||
@ -77,7 +79,7 @@ std::shared_ptr<ov::Core> PluginCache::core(const std::string &deviceToCheck) {
|
||||
#ifndef NDEBUG
|
||||
std::cout << "Available devices for " << deviceToCheck << ":" << std::endl;
|
||||
|
||||
for (const auto &device : availableDevices) {
|
||||
for (const auto& device : availableDevices) {
|
||||
std::cout << " " << device << std::endl;
|
||||
}
|
||||
#endif
|
||||
@ -97,7 +99,7 @@ void PluginCache::reset() {
|
||||
}
|
||||
|
||||
PluginCache::PluginCache() {
|
||||
auto &listeners = testing::UnitTest::GetInstance()->listeners();
|
||||
auto& listeners = testing::UnitTest::GetInstance()->listeners();
|
||||
listeners.Append(new TestListener);
|
||||
}
|
||||
} // namespace utils
|
||||
|
@ -2,22 +2,23 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "common_test_utils/test_constants.hpp"
|
||||
#include "functional_test_utils/plugin_cache.hpp"
|
||||
#include "functional_test_utils/ov_plugin_cache.hpp"
|
||||
#include "common_test_utils/file_utils.hpp"
|
||||
|
||||
#include <cstdlib>
|
||||
#include <unordered_map>
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include <cstdlib>
|
||||
#include <ie_plugin_config.hpp>
|
||||
#include <unordered_map>
|
||||
|
||||
#include "common_test_utils/file_utils.hpp"
|
||||
#include "common_test_utils/test_constants.hpp"
|
||||
#include "functional_test_utils/ov_plugin_cache.hpp"
|
||||
#include "openvino/util/file_util.hpp"
|
||||
|
||||
namespace {
|
||||
class TestListener : public testing::EmptyTestEventListener {
|
||||
public:
|
||||
void OnTestEnd(const testing::TestInfo &testInfo) override {
|
||||
void OnTestEnd(const testing::TestInfo& testInfo) override {
|
||||
if (auto testResult = testInfo.result()) {
|
||||
if (testResult->Failed()) {
|
||||
PluginCache::get().reset();
|
||||
@ -27,12 +28,12 @@ public:
|
||||
};
|
||||
} // namespace
|
||||
|
||||
PluginCache &PluginCache::get() {
|
||||
PluginCache& PluginCache::get() {
|
||||
static PluginCache instance;
|
||||
return instance;
|
||||
}
|
||||
|
||||
std::shared_ptr<InferenceEngine::Core> PluginCache::ie(const std::string &deviceToCheck) {
|
||||
std::shared_ptr<InferenceEngine::Core> PluginCache::ie(const std::string& deviceToCheck) {
|
||||
std::lock_guard<std::mutex> lock(g_mtx);
|
||||
if (std::getenv("DISABLE_PLUGIN_CACHE") != nullptr) {
|
||||
#ifndef NDEBUG
|
||||
@ -57,8 +58,11 @@ std::shared_ptr<InferenceEngine::Core> PluginCache::ie(const std::string &device
|
||||
try {
|
||||
std::string pluginName = "openvino_template_plugin";
|
||||
pluginName += IE_BUILD_POSTFIX;
|
||||
ie_core->RegisterPlugin(ov::util::make_plugin_library_name(ov::test::utils::getExecutableDirectory(), pluginName), "TEMPLATE");
|
||||
} catch (...) {}
|
||||
ie_core->RegisterPlugin(
|
||||
ov::util::make_plugin_library_name(ov::test::utils::getExecutableDirectory(), pluginName),
|
||||
"TEMPLATE");
|
||||
} catch (...) {
|
||||
}
|
||||
|
||||
if (!deviceToCheck.empty()) {
|
||||
std::vector<std::string> metrics;
|
||||
@ -69,7 +73,8 @@ std::shared_ptr<InferenceEngine::Core> PluginCache::ie(const std::string &device
|
||||
metrics = {ie_core->GetMetric(deviceToCheck, METRIC_KEY(SUPPORTED_METRICS)).as<std::string>()};
|
||||
}
|
||||
if (std::find(metrics.begin(), metrics.end(), METRIC_KEY(AVAILABLE_DEVICES)) != metrics.end()) {
|
||||
auto availableDevices = ie_core->GetMetric(deviceToCheck, METRIC_KEY(AVAILABLE_DEVICES)).as<std::vector<std::string>>();
|
||||
auto availableDevices =
|
||||
ie_core->GetMetric(deviceToCheck, METRIC_KEY(AVAILABLE_DEVICES)).as<std::vector<std::string>>();
|
||||
|
||||
if (availableDevices.empty()) {
|
||||
std::cerr << "No available devices for " << deviceToCheck << std::endl;
|
||||
@ -79,7 +84,7 @@ std::shared_ptr<InferenceEngine::Core> PluginCache::ie(const std::string &device
|
||||
#ifndef NDEBUG
|
||||
std::cout << "Available devices for " << deviceToCheck << ":" << std::endl;
|
||||
|
||||
for (const auto &device : availableDevices) {
|
||||
for (const auto& device : availableDevices) {
|
||||
std::cout << " " << device << std::endl;
|
||||
}
|
||||
#endif
|
||||
@ -99,6 +104,6 @@ void PluginCache::reset() {
|
||||
}
|
||||
|
||||
PluginCache::PluginCache() {
|
||||
auto &listeners = testing::UnitTest::GetInstance()->listeners();
|
||||
auto& listeners = testing::UnitTest::GetInstance()->listeners();
|
||||
listeners.Append(new TestListener);
|
||||
}
|
||||
|
@ -6,11 +6,12 @@
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include <ngraph/ngraph.hpp>
|
||||
#include <ngraph/ops.hpp>
|
||||
#include <ngraph/type/float16.hpp>
|
||||
|
||||
#include <algorithm>
|
||||
#include <cassert>
|
||||
#include <cctype>
|
||||
#include <cmath>
|
||||
#include <cstdlib>
|
||||
#include <cstring>
|
||||
#include <functional>
|
||||
#include <initializer_list>
|
||||
#include <iterator>
|
||||
@ -28,8 +29,6 @@
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include <cassert>
|
||||
#include <cctype>
|
||||
#include <cmath>
|
||||
#include <cstdlib>
|
||||
#include <cstring>
|
||||
#include "openvino/core/type/float16.hpp"
|
||||
#include "openvino/op/ops.hpp"
|
||||
#include "openvino/openvino.hpp"
|
||||
|
@ -2,25 +2,27 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <iostream>
|
||||
#include <fstream>
|
||||
|
||||
#include "common_test_utils/file_utils.hpp"
|
||||
#include "functional_test_utils/skip_tests_config.hpp"
|
||||
|
||||
namespace FuncTestUtils {
|
||||
namespace SkipTestsConfig {
|
||||
#include <fstream>
|
||||
#include <iostream>
|
||||
|
||||
#include "common_test_utils/file_utils.hpp"
|
||||
|
||||
namespace ov {
|
||||
namespace test {
|
||||
namespace utils {
|
||||
|
||||
bool disable_tests_skipping = false;
|
||||
|
||||
bool currentTestIsDisabled() {
|
||||
bool current_test_is_disabled() {
|
||||
if (disable_tests_skipping)
|
||||
return false;
|
||||
|
||||
const auto fullName = ::testing::UnitTest::GetInstance()->current_test_info()->test_case_name()
|
||||
+ std::string(".") + ::testing::UnitTest::GetInstance()->current_test_info()->name();
|
||||
const auto fullName = ::testing::UnitTest::GetInstance()->current_test_info()->test_case_name() + std::string(".") +
|
||||
::testing::UnitTest::GetInstance()->current_test_info()->name();
|
||||
|
||||
for (const auto &pattern : disabledTestPatterns()) {
|
||||
for (const auto& pattern : disabledTestPatterns()) {
|
||||
std::regex re(pattern);
|
||||
if (std::regex_match(fullName, re))
|
||||
return true;
|
||||
@ -28,5 +30,7 @@ bool currentTestIsDisabled() {
|
||||
|
||||
return false;
|
||||
}
|
||||
} // namespace SkipTestsConfig
|
||||
} // namespace FuncTestUtils
|
||||
|
||||
} // namespace utils
|
||||
} // namespace test
|
||||
} // namespace ov
|
||||
|
@ -2,34 +2,35 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "functional_test_utils/summary/api_summary.hpp"
|
||||
|
||||
#include <pugixml.hpp>
|
||||
|
||||
#include "functional_test_utils/summary/api_summary.hpp"
|
||||
#include "common_test_utils/file_utils.hpp"
|
||||
|
||||
using namespace ov::test::utils;
|
||||
|
||||
#ifdef _WIN32
|
||||
# define getpid _getpid
|
||||
# define getpid _getpid
|
||||
#endif
|
||||
|
||||
ApiSummary *ApiSummary::p_instance = nullptr;
|
||||
ApiSummary* ApiSummary::p_instance = nullptr;
|
||||
ApiSummaryDestroyer ApiSummary::destroyer;
|
||||
const std::map<ov_entity, std::string> ApiSummary::apiInfo({
|
||||
{ ov_entity::ov_infer_request, "ov_infer_request"},
|
||||
{ ov_entity::ov_plugin, "ov_plugin"},
|
||||
{ ov_entity::ov_compiled_model, "ov_compiled_model"},
|
||||
{ ov_entity::ie_infer_request, "ie_infer_request"},
|
||||
{ ov_entity::ie_plugin, "ie_plugin"},
|
||||
{ ov_entity::ie_executable_network, "ie_executable_network"},
|
||||
{ ov_entity::undefined, "undefined"},
|
||||
{ov_entity::ov_infer_request, "ov_infer_request"},
|
||||
{ov_entity::ov_plugin, "ov_plugin"},
|
||||
{ov_entity::ov_compiled_model, "ov_compiled_model"},
|
||||
{ov_entity::ie_infer_request, "ie_infer_request"},
|
||||
{ov_entity::ie_plugin, "ie_plugin"},
|
||||
{ov_entity::ie_executable_network, "ie_executable_network"},
|
||||
{ov_entity::undefined, "undefined"},
|
||||
});
|
||||
|
||||
ApiSummaryDestroyer::~ApiSummaryDestroyer() {
|
||||
delete p_instance;
|
||||
}
|
||||
|
||||
void ApiSummaryDestroyer::initialize(ApiSummary *p) {
|
||||
void ApiSummaryDestroyer::initialize(ApiSummary* p) {
|
||||
p_instance = p;
|
||||
}
|
||||
|
||||
@ -39,7 +40,7 @@ ApiSummary::ApiSummary() : apiStats() {
|
||||
isHangReported = false;
|
||||
}
|
||||
|
||||
ApiSummary &ApiSummary::getInstance() {
|
||||
ApiSummary& ApiSummary::getInstance() {
|
||||
if (!p_instance) {
|
||||
p_instance = new ApiSummary();
|
||||
destroyer.initialize(p_instance);
|
||||
@ -47,9 +48,13 @@ ApiSummary &ApiSummary::getInstance() {
|
||||
return *p_instance;
|
||||
}
|
||||
|
||||
void ApiSummary::updateStat(ov_entity entity, const std::string& target_device, PassRate::Statuses status, double rel_influence_coef) {
|
||||
void ApiSummary::updateStat(ov_entity entity,
|
||||
const std::string& target_device,
|
||||
PassRate::Statuses status,
|
||||
double rel_influence_coef) {
|
||||
if (apiStats.empty()) {
|
||||
std::string outputFilePath = outputFolder + std::string(ov::test::utils::FileSeparator) + reportFilename + ov::test::utils::REPORT_EXTENSION;
|
||||
std::string outputFilePath = outputFolder + std::string(ov::test::utils::FileSeparator) + reportFilename +
|
||||
ov::test::utils::REPORT_EXTENSION;
|
||||
const bool fileExists = ov::test::utils::fileExists(outputFilePath);
|
||||
if (extendReport && !isReported && fileExists) {
|
||||
getStatisticFromReport(outputFilePath);
|
||||
@ -77,31 +82,31 @@ void ApiSummary::updateStat(ov_entity entity, const std::string& target_device,
|
||||
return;
|
||||
}
|
||||
switch (status) {
|
||||
case PassRate::Statuses::SKIPPED: {
|
||||
cur_stat[real_device].skipped++;
|
||||
break;
|
||||
case PassRate::Statuses::SKIPPED: {
|
||||
cur_stat[real_device].skipped++;
|
||||
break;
|
||||
}
|
||||
case PassRate::Statuses::PASSED: {
|
||||
if (!cur_stat[real_device].isImplemented) {
|
||||
cur_stat[real_device].isImplemented = true;
|
||||
}
|
||||
case PassRate::Statuses::PASSED: {
|
||||
if (!cur_stat[real_device].isImplemented) {
|
||||
cur_stat[real_device].isImplemented = true;
|
||||
}
|
||||
cur_stat[real_device].passed++;
|
||||
cur_stat[real_device].rel_passed += rel_influence_coef;
|
||||
break;
|
||||
}
|
||||
case PassRate::Statuses::HANGED: {
|
||||
cur_stat[real_device].hanged++;
|
||||
isHangReported = true;
|
||||
break;
|
||||
}
|
||||
case PassRate::Statuses::FAILED: {
|
||||
cur_stat[real_device].failed++;
|
||||
break;
|
||||
}
|
||||
case PassRate::Statuses::CRASHED:
|
||||
cur_stat[real_device].crashed++;
|
||||
isCrashReported = true;
|
||||
break;
|
||||
cur_stat[real_device].passed++;
|
||||
cur_stat[real_device].rel_passed += rel_influence_coef;
|
||||
break;
|
||||
}
|
||||
case PassRate::Statuses::HANGED: {
|
||||
cur_stat[real_device].hanged++;
|
||||
isHangReported = true;
|
||||
break;
|
||||
}
|
||||
case PassRate::Statuses::FAILED: {
|
||||
cur_stat[real_device].failed++;
|
||||
break;
|
||||
}
|
||||
case PassRate::Statuses::CRASHED:
|
||||
cur_stat[real_device].crashed++;
|
||||
isCrashReported = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@ -122,7 +127,7 @@ void ApiSummary::getStatisticFromReport(const std::string& filePath) {
|
||||
|
||||
pugi::xml_node resultsNode = root.child("results");
|
||||
pugi::xml_node currentDeviceNode = resultsNode.child(deviceName.c_str());
|
||||
for (auto &entityNode : currentDeviceNode.children()) {
|
||||
for (auto& entityNode : currentDeviceNode.children()) {
|
||||
std::string entityName = entityNode.name();
|
||||
ov_entity entity = getOvEntityByName(entityName);
|
||||
for (const auto& realDeviceNode : entityNode.children()) {
|
||||
@ -157,7 +162,7 @@ void ApiSummary::saveReport() {
|
||||
|
||||
std::string outputFilePath = outputFolder + std::string(ov::test::utils::FileSeparator) + filename;
|
||||
|
||||
auto &summary = ApiSummary::getInstance();
|
||||
auto& summary = ApiSummary::getInstance();
|
||||
auto stats = summary.getApiStats();
|
||||
|
||||
pugi::xml_document doc;
|
||||
@ -165,12 +170,12 @@ void ApiSummary::saveReport() {
|
||||
const bool fileExists = ov::test::utils::fileExists(outputFilePath);
|
||||
|
||||
time_t rawtime;
|
||||
struct tm *timeinfo;
|
||||
struct tm* timeinfo;
|
||||
char timeNow[80];
|
||||
|
||||
time(&rawtime);
|
||||
// cpplint require to use localtime_r instead which is not available in C++11
|
||||
timeinfo = localtime(&rawtime); // NOLINT
|
||||
timeinfo = localtime(&rawtime); // NOLINT
|
||||
|
||||
strftime(timeNow, sizeof(timeNow), "%d-%m-%Y %H:%M:%S", timeinfo);
|
||||
|
||||
@ -178,7 +183,7 @@ void ApiSummary::saveReport() {
|
||||
if (fileExists) {
|
||||
doc.load_file(outputFilePath.c_str());
|
||||
root = doc.child("report");
|
||||
//Ugly but shorter than to write predicate for find_atrribute() to update existing one
|
||||
// Ugly but shorter than to write predicate for find_atrribute() to update existing one
|
||||
root.remove_attribute("timestamp");
|
||||
root.append_attribute("timestamp").set_value(timeNow);
|
||||
|
||||
@ -191,16 +196,16 @@ void ApiSummary::saveReport() {
|
||||
}
|
||||
|
||||
pugi::xml_node opsNode = root.append_child("api_list");
|
||||
for (const auto &api : apiInfo) {
|
||||
for (const auto& api : apiInfo) {
|
||||
std::string name = api.second;
|
||||
pugi::xml_node entry = opsNode.append_child(name.c_str());
|
||||
(void) entry;
|
||||
(void)entry;
|
||||
}
|
||||
|
||||
pugi::xml_node resultsNode = root.child("results");
|
||||
pugi::xml_node currentDeviceNode = resultsNode.append_child(summary.deviceName.c_str());
|
||||
std::unordered_set<std::string> opList;
|
||||
for (const auto &stat_entity : stats) {
|
||||
for (const auto& stat_entity : stats) {
|
||||
pugi::xml_node currentEntity = currentDeviceNode.append_child(apiInfo.at(stat_entity.first).c_str());
|
||||
for (const auto& stat_device : stat_entity.second) {
|
||||
pugi::xml_node entry = currentEntity.append_child(stat_device.first.c_str());
|
||||
@ -211,8 +216,10 @@ void ApiSummary::saveReport() {
|
||||
entry.append_attribute("crashed").set_value(static_cast<unsigned long long>(stat_device.second.crashed));
|
||||
entry.append_attribute("hanged").set_value(static_cast<unsigned long long>(stat_device.second.hanged));
|
||||
entry.append_attribute("passrate").set_value(stat_device.second.getPassrate());
|
||||
entry.append_attribute("relative_passed").set_value(static_cast<unsigned long long>(stat_device.second.rel_passed));
|
||||
entry.append_attribute("relative_all").set_value(static_cast<unsigned long long>(stat_device.second.rel_all));
|
||||
entry.append_attribute("relative_passed")
|
||||
.set_value(static_cast<unsigned long long>(stat_device.second.rel_passed));
|
||||
entry.append_attribute("relative_all")
|
||||
.set_value(static_cast<unsigned long long>(stat_device.second.rel_all));
|
||||
entry.append_attribute("relative_passrate").set_value(stat_device.second.getRelPassrate());
|
||||
}
|
||||
}
|
||||
@ -230,4 +237,3 @@ void ApiSummary::saveReport() {
|
||||
isReported = true;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -15,7 +15,7 @@ std::string get_node_version(const std::shared_ptr<ov::Node>& node, const std::s
|
||||
std::string opset_name = "opset";
|
||||
auto pos = opset_version.find(opset_name);
|
||||
if (pos != std::string::npos) {
|
||||
op_name += "-" + opset_version.substr(pos + opset_name.size());
|
||||
op_name += "-" + opset_version.substr(pos + opset_name.size());
|
||||
}
|
||||
if (!postfix.empty()) {
|
||||
op_name += "_" + postfix;
|
||||
@ -33,9 +33,10 @@ ModelInfo::ModelInfo(size_t _op_cnt, const std::map<std::string, size_t>& _model
|
||||
: unique_op_cnt(_op_cnt),
|
||||
model_paths(_model_paths) {}
|
||||
|
||||
|
||||
PortInfo::PortInfo(double min, double max, bool convert_to_const) : min(min), max(max),
|
||||
convert_to_const(convert_to_const) {}
|
||||
PortInfo::PortInfo(double min, double max, bool convert_to_const)
|
||||
: min(min),
|
||||
max(max),
|
||||
convert_to_const(convert_to_const) {}
|
||||
|
||||
PortInfo::PortInfo() {
|
||||
min = std::numeric_limits<double>::min();
|
||||
@ -48,4 +49,4 @@ OPInfo::OPInfo(const std::string& source_model, const std::string& model_path, s
|
||||
ports_info = {};
|
||||
}
|
||||
|
||||
} // namespace LayerTestsUtils
|
||||
} // namespace LayerTestsUtils
|
||||
|
@ -2,21 +2,20 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <algorithm>
|
||||
#include "functional_test_utils/summary/op_summary.hpp"
|
||||
|
||||
#include <algorithm>
|
||||
#include <pugixml.hpp>
|
||||
|
||||
|
||||
#include "functional_test_utils/summary/op_summary.hpp"
|
||||
#include "common_test_utils/file_utils.hpp"
|
||||
|
||||
using namespace ov::test::utils;
|
||||
|
||||
#ifdef _WIN32
|
||||
# define getpid _getpid
|
||||
# define getpid _getpid
|
||||
#endif
|
||||
|
||||
OpSummary *OpSummary::p_instance = nullptr;
|
||||
OpSummary* OpSummary::p_instance = nullptr;
|
||||
bool OpSummary::extractBody = false;
|
||||
OpSummaryDestroyer OpSummary::destroyer;
|
||||
|
||||
@ -24,7 +23,7 @@ OpSummaryDestroyer::~OpSummaryDestroyer() {
|
||||
delete p_instance;
|
||||
}
|
||||
|
||||
void OpSummaryDestroyer::initialize(OpSummary *p) {
|
||||
void OpSummaryDestroyer::initialize(OpSummary* p) {
|
||||
p_instance = p;
|
||||
}
|
||||
|
||||
@ -32,7 +31,7 @@ OpSummary::OpSummary() {
|
||||
reportFilename = ov::test::utils::OP_REPORT_FILENAME;
|
||||
}
|
||||
|
||||
OpSummary &OpSummary::getInstance() {
|
||||
OpSummary& OpSummary::getInstance() {
|
||||
if (!p_instance) {
|
||||
p_instance = new OpSummary();
|
||||
destroyer.initialize(p_instance);
|
||||
@ -40,12 +39,14 @@ OpSummary &OpSummary::getInstance() {
|
||||
return *p_instance;
|
||||
}
|
||||
|
||||
void OpSummary::updateOPsStats(const ov::NodeTypeInfo &op, const PassRate::Statuses &status, double rel_influence_coef) {
|
||||
void OpSummary::updateOPsStats(const ov::NodeTypeInfo& op,
|
||||
const PassRate::Statuses& status,
|
||||
double rel_influence_coef) {
|
||||
auto it = opsStats.find(op);
|
||||
if (opsStats.find(op) == opsStats.end()) {
|
||||
opsStats.insert({op, PassRate()});
|
||||
}
|
||||
auto &passrate = opsStats[op];
|
||||
auto& passrate = opsStats[op];
|
||||
if (isCrashReported) {
|
||||
isCrashReported = false;
|
||||
if (passrate.crashed > 0)
|
||||
@ -58,33 +59,33 @@ void OpSummary::updateOPsStats(const ov::NodeTypeInfo &op, const PassRate::Statu
|
||||
return;
|
||||
}
|
||||
switch (status) {
|
||||
case PassRate::PASSED:
|
||||
if (!passrate.isImplemented) {
|
||||
passrate.isImplemented = true;
|
||||
}
|
||||
passrate.passed++;
|
||||
passrate.rel_passed += rel_influence_coef;
|
||||
break;
|
||||
case PassRate::FAILED:
|
||||
passrate.failed++;
|
||||
break;
|
||||
case PassRate::SKIPPED:
|
||||
passrate.skipped++;
|
||||
break;
|
||||
case PassRate::CRASHED: {
|
||||
passrate.crashed++;
|
||||
isCrashReported = true;
|
||||
break;
|
||||
}
|
||||
case PassRate::HANGED: {
|
||||
passrate.hanged++;
|
||||
isHangReported = true;
|
||||
break;
|
||||
case PassRate::PASSED:
|
||||
if (!passrate.isImplemented) {
|
||||
passrate.isImplemented = true;
|
||||
}
|
||||
passrate.passed++;
|
||||
passrate.rel_passed += rel_influence_coef;
|
||||
break;
|
||||
case PassRate::FAILED:
|
||||
passrate.failed++;
|
||||
break;
|
||||
case PassRate::SKIPPED:
|
||||
passrate.skipped++;
|
||||
break;
|
||||
case PassRate::CRASHED: {
|
||||
passrate.crashed++;
|
||||
isCrashReported = true;
|
||||
break;
|
||||
}
|
||||
case PassRate::HANGED: {
|
||||
passrate.hanged++;
|
||||
isHangReported = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void OpSummary::updateOPsImplStatus(const ov::NodeTypeInfo &op, const bool implStatus) {
|
||||
void OpSummary::updateOPsImplStatus(const ov::NodeTypeInfo& op, const bool implStatus) {
|
||||
auto it = opsStats.find(op);
|
||||
if (it != opsStats.end()) {
|
||||
if (!it->second.isImplemented && implStatus) {
|
||||
@ -119,7 +120,7 @@ std::map<std::string, PassRate> OpSummary::getStatisticFromReport() {
|
||||
pugi::xml_node resultsNode = root.child("results");
|
||||
pugi::xml_node currentDeviceNode = resultsNode.child(deviceName.c_str());
|
||||
std::map<std::string, PassRate> oldOpsStat;
|
||||
for (auto &child : currentDeviceNode.children()) {
|
||||
for (auto& child : currentDeviceNode.children()) {
|
||||
std::string entry = child.name();
|
||||
auto p = std::stoi(child.attribute("passed").value());
|
||||
auto f = std::stoi(child.attribute("failed").value());
|
||||
@ -134,12 +135,12 @@ std::map<std::string, PassRate> OpSummary::getStatisticFromReport() {
|
||||
return oldOpsStat;
|
||||
}
|
||||
|
||||
void OpSummary::updateOPsStats(const std::shared_ptr<ov::Model> &model, const PassRate::Statuses &status, double k) {
|
||||
void OpSummary::updateOPsStats(const std::shared_ptr<ov::Model>& model, const PassRate::Statuses& status, double k) {
|
||||
if (model->get_parameters().empty()) {
|
||||
return;
|
||||
}
|
||||
bool isFunctionalGraph = false, isReportConvert = true;
|
||||
for (const auto &op : model->get_ordered_ops()) {
|
||||
for (const auto& op : model->get_ordered_ops()) {
|
||||
if (!std::dynamic_pointer_cast<ov::op::v0::Parameter>(op) &&
|
||||
!std::dynamic_pointer_cast<ov::op::v0::Constant>(op) &&
|
||||
!std::dynamic_pointer_cast<ov::op::v0::Result>(op)) {
|
||||
@ -154,10 +155,11 @@ void OpSummary::updateOPsStats(const std::shared_ptr<ov::Model> &model, const Pa
|
||||
}
|
||||
}
|
||||
|
||||
for (const auto &op : model->get_ordered_ops()) {
|
||||
for (const auto& op : model->get_ordered_ops()) {
|
||||
if ((std::dynamic_pointer_cast<ov::op::v0::Parameter>(op) ||
|
||||
std::dynamic_pointer_cast<ov::op::v0::Constant>(op) ||
|
||||
std::dynamic_pointer_cast<ov::op::v0::Result>(op)) && isFunctionalGraph) {
|
||||
std::dynamic_pointer_cast<ov::op::v0::Result>(op)) &&
|
||||
isFunctionalGraph) {
|
||||
continue;
|
||||
}
|
||||
// todo: remove w/a to provide correct convert reporting after merge CVS-110714
|
||||
@ -193,12 +195,12 @@ void OpSummary::updateOPsStats(const std::shared_ptr<ov::Model> &model, const Pa
|
||||
}
|
||||
}
|
||||
|
||||
void OpSummary::updateOPsImplStatus(const std::shared_ptr<ov::Model> &model, const bool implStatus) {
|
||||
void OpSummary::updateOPsImplStatus(const std::shared_ptr<ov::Model>& model, const bool implStatus) {
|
||||
if (model->get_parameters().empty()) {
|
||||
return;
|
||||
}
|
||||
bool isFunctionalGraph = false;
|
||||
for (const auto &op : model->get_ordered_ops()) {
|
||||
for (const auto& op : model->get_ordered_ops()) {
|
||||
if (!std::dynamic_pointer_cast<ov::op::v0::Parameter>(op) &&
|
||||
!std::dynamic_pointer_cast<ov::op::v0::Constant>(op) &&
|
||||
!std::dynamic_pointer_cast<ov::op::v0::Result>(op)) {
|
||||
@ -207,10 +209,11 @@ void OpSummary::updateOPsImplStatus(const std::shared_ptr<ov::Model> &model, con
|
||||
}
|
||||
}
|
||||
|
||||
for (const auto &op : model->get_ordered_ops()) {
|
||||
for (const auto& op : model->get_ordered_ops()) {
|
||||
if ((std::dynamic_pointer_cast<ov::op::v0::Parameter>(op) ||
|
||||
std::dynamic_pointer_cast<ov::op::v0::Constant>(op) ||
|
||||
std::dynamic_pointer_cast<ov::op::v0::Result>(op)) && isFunctionalGraph) {
|
||||
std::dynamic_pointer_cast<ov::op::v0::Result>(op)) &&
|
||||
isFunctionalGraph) {
|
||||
continue;
|
||||
} else if (std::dynamic_pointer_cast<ov::op::v0::TensorIterator>(op)) {
|
||||
updateOPsImplStatus(op->get_type_info(), implStatus);
|
||||
@ -229,15 +232,21 @@ void OpSummary::updateOPsImplStatus(const std::shared_ptr<ov::Model> &model, con
|
||||
}
|
||||
|
||||
#ifdef IE_TEST_DEBUG
|
||||
void Summary::saveDebugReport(const char* className, const char* opName, unsigned long passed, unsigned long failed,
|
||||
unsigned long skipped, unsigned long crashed, unsigned long hanged) {
|
||||
void Summary::saveDebugReport(const char* className,
|
||||
const char* opName,
|
||||
unsigned long passed,
|
||||
unsigned long failed,
|
||||
unsigned long skipped,
|
||||
unsigned long crashed,
|
||||
unsigned long hanged) {
|
||||
std::string outputFilePath = "./part_report.txt";
|
||||
std::ofstream file;
|
||||
file.open(outputFilePath, std::ios_base::app);
|
||||
file << className << ' ' << opName << ' ' << passed << ' ' << failed << ' ' << skipped << ' ' << crashed << ' ' << hanged << '\n';
|
||||
file << className << ' ' << opName << ' ' << passed << ' ' << failed << ' ' << skipped << ' ' << crashed << ' '
|
||||
<< hanged << '\n';
|
||||
file.close();
|
||||
}
|
||||
#endif //IE_TEST_DEBUG
|
||||
#endif // IE_TEST_DEBUG
|
||||
|
||||
void OpSummary::saveReport() {
|
||||
if (isReported) {
|
||||
@ -262,10 +271,10 @@ void OpSummary::saveReport() {
|
||||
std::string outputFilePath = outputFolder + std::string(ov::test::utils::FileSeparator) + filename;
|
||||
|
||||
std::map<ov::NodeTypeInfo, std::string> opsInfo;
|
||||
for (const auto &opset_pair : get_available_opsets()) {
|
||||
for (const auto& opset_pair : get_available_opsets()) {
|
||||
std::string opset_version = opset_pair.first;
|
||||
const ov::OpSet& opset = opset_pair.second();
|
||||
const auto &type_info_set = opset.get_type_info_set();
|
||||
const auto& type_info_set = opset.get_type_info_set();
|
||||
for (const auto& type_info : type_info_set) {
|
||||
auto it = opsInfo.find(type_info);
|
||||
std::string op_version = getOpVersion(opset_version);
|
||||
@ -278,7 +287,7 @@ void OpSummary::saveReport() {
|
||||
}
|
||||
}
|
||||
|
||||
auto &summary = OpSummary::getInstance();
|
||||
auto& summary = OpSummary::getInstance();
|
||||
auto stats = summary.getOPsStats();
|
||||
|
||||
pugi::xml_document doc;
|
||||
@ -286,12 +295,12 @@ void OpSummary::saveReport() {
|
||||
const bool fileExists = ov::test::utils::fileExists(outputFilePath);
|
||||
|
||||
time_t rawtime;
|
||||
struct tm *timeinfo;
|
||||
struct tm* timeinfo;
|
||||
char timeNow[80];
|
||||
|
||||
time(&rawtime);
|
||||
// cpplint require to use localtime_r instead which is not available in C++11
|
||||
timeinfo = localtime(&rawtime); // NOLINT
|
||||
timeinfo = localtime(&rawtime); // NOLINT
|
||||
|
||||
strftime(timeNow, sizeof(timeNow), "%d-%m-%Y %H:%M:%S", timeinfo);
|
||||
|
||||
@ -299,7 +308,7 @@ void OpSummary::saveReport() {
|
||||
if (fileExists) {
|
||||
doc.load_file(outputFilePath.c_str());
|
||||
root = doc.child("report");
|
||||
//Ugly but shorter than to write predicate for find_atrribute() to update existing one
|
||||
// Ugly but shorter than to write predicate for find_atrribute() to update existing one
|
||||
root.remove_attribute("timestamp");
|
||||
root.append_attribute("timestamp").set_value(timeNow);
|
||||
|
||||
@ -312,7 +321,7 @@ void OpSummary::saveReport() {
|
||||
}
|
||||
|
||||
pugi::xml_node opsNode = root.append_child("ops_list");
|
||||
for (const auto &op : opsInfo) {
|
||||
for (const auto& op : opsInfo) {
|
||||
std::string name = std::string(op.first.name) + "-" + getOpVersion(op.first.version_id);
|
||||
opsNode.append_child(name.c_str()).append_attribute("opsets").set_value(op.second.c_str());
|
||||
}
|
||||
@ -320,7 +329,7 @@ void OpSummary::saveReport() {
|
||||
pugi::xml_node resultsNode = root.child("results");
|
||||
pugi::xml_node currentDeviceNode = resultsNode.append_child(summary.deviceName.c_str());
|
||||
std::unordered_set<std::string> opList;
|
||||
for (const auto &it : stats) {
|
||||
for (const auto& it : stats) {
|
||||
std::string name = std::string(it.first.name) + "-" + getOpVersion(it.first.version_id);
|
||||
opList.insert(name);
|
||||
pugi::xml_node entry = currentDeviceNode.append_child(name.c_str());
|
||||
@ -338,7 +347,7 @@ void OpSummary::saveReport() {
|
||||
|
||||
if (extendReport && fileExists) {
|
||||
auto opStataFromReport = summary.getStatisticFromReport();
|
||||
for (auto &item : opStataFromReport) {
|
||||
for (auto& item : opStataFromReport) {
|
||||
pugi::xml_node entry;
|
||||
if (opList.find(item.first) == opList.end()) {
|
||||
entry = currentDeviceNode.append_child(item.first.c_str());
|
||||
@ -364,9 +373,8 @@ void OpSummary::saveReport() {
|
||||
auto rel_all = std::stoi(entry.attribute("relative_all").value()) + item.second.rel_all;
|
||||
PassRate obj(p, f, s, c, h, rel_passed, rel_all);
|
||||
|
||||
(implStatus || obj.isImplemented)
|
||||
? entry.attribute("implemented").set_value(true)
|
||||
: entry.attribute("implemented").set_value(false);
|
||||
(implStatus || obj.isImplemented) ? entry.attribute("implemented").set_value(true)
|
||||
: entry.attribute("implemented").set_value(false);
|
||||
entry.attribute("passed").set_value(static_cast<unsigned long long>(obj.passed));
|
||||
entry.attribute("failed").set_value(static_cast<unsigned long long>(obj.failed));
|
||||
entry.attribute("skipped").set_value(static_cast<unsigned long long>(obj.skipped));
|
||||
|
@ -8,7 +8,13 @@ namespace ov {
|
||||
namespace test {
|
||||
namespace utils {
|
||||
|
||||
PassRate::PassRate(unsigned long p, unsigned long f, unsigned long s, unsigned long c, unsigned long h, double rel_p, double rel_a) {
|
||||
PassRate::PassRate(unsigned long p,
|
||||
unsigned long f,
|
||||
unsigned long s,
|
||||
unsigned long c,
|
||||
unsigned long h,
|
||||
double rel_p,
|
||||
double rel_a) {
|
||||
passed = p;
|
||||
failed = f;
|
||||
skipped = s;
|
||||
@ -60,17 +66,30 @@ void Summary::setReportFilename(const std::string& val) {
|
||||
reportFilename = val.c_str();
|
||||
}
|
||||
|
||||
void Summary::setExtendReport(bool val) { extendReport = val; }
|
||||
bool Summary::getExtendReport() { return extendReport; }
|
||||
void Summary::setExtendReport(bool val) {
|
||||
extendReport = val;
|
||||
}
|
||||
bool Summary::getExtendReport() {
|
||||
return extendReport;
|
||||
}
|
||||
|
||||
void Summary::setSaveReportWithUniqueName(bool val) { saveReportWithUniqueName = val; }
|
||||
bool Summary::getSaveReportWithUniqueName() { return saveReportWithUniqueName; }
|
||||
void Summary::setSaveReportWithUniqueName(bool val) {
|
||||
saveReportWithUniqueName = val;
|
||||
}
|
||||
bool Summary::getSaveReportWithUniqueName() {
|
||||
return saveReportWithUniqueName;
|
||||
}
|
||||
|
||||
void Summary::setSaveReportTimeout(size_t val) { saveReportTimeout = val; }
|
||||
size_t Summary::getSaveReportTimeout() { return saveReportTimeout; }
|
||||
|
||||
void Summary::setOutputFolder(const std::string &val) { outputFolder = val.c_str(); }
|
||||
void Summary::setSaveReportTimeout(size_t val) {
|
||||
saveReportTimeout = val;
|
||||
}
|
||||
size_t Summary::getSaveReportTimeout() {
|
||||
return saveReportTimeout;
|
||||
}
|
||||
|
||||
void Summary::setOutputFolder(const std::string& val) {
|
||||
outputFolder = val.c_str();
|
||||
}
|
||||
|
||||
} // namespace utils
|
||||
} // namespace test
|
||||
|
@ -2,39 +2,26 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <fstream>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <algorithm>
|
||||
#include <vector>
|
||||
|
||||
#include "functional_test_utils/test_model/test_model.hpp"
|
||||
#include "functional_test_utils/precision_utils.hpp"
|
||||
#include <ngraph_functions/subgraph_builders.hpp>
|
||||
#include <ngraph/pass/manager.hpp>
|
||||
|
||||
#include "ngraph_functions/subgraph_builders.hpp"
|
||||
#include "openvino/core/partial_shape.hpp"
|
||||
#include "openvino/pass/manager.hpp"
|
||||
#include "openvino/pass/serialize.hpp"
|
||||
#include "ie_ngraph_utils.hpp"
|
||||
|
||||
namespace FuncTestUtils {
|
||||
namespace TestModel {
|
||||
namespace ov {
|
||||
namespace test {
|
||||
namespace utils {
|
||||
|
||||
/**
|
||||
* @brief generates IR files (XML and BIN files) with the test model.
|
||||
* Passed reference vector is filled with CNN layers to validate after the network reading.
|
||||
* @param modelPath used to serialize the generated network
|
||||
* @param weightsPath used to serialize the generated weights
|
||||
* @param netPrc precision of the generated network
|
||||
* @param inputDims dims on the input layer of the generated network
|
||||
*/
|
||||
void generateTestModel(const std::string &modelPath,
|
||||
const std::string &weightsPath,
|
||||
const InferenceEngine::Precision &netPrc,
|
||||
const InferenceEngine::SizeVector &inputDims) {
|
||||
ngraph::pass::Manager manager;
|
||||
manager.register_pass<ov::pass::Serialize>(modelPath, weightsPath);
|
||||
manager.run_passes(ngraph::builder::subgraph::makeConvPoolRelu(
|
||||
inputDims, InferenceEngine::details::convertPrecision(netPrc)));
|
||||
void generate_test_model(const std::string& model_path,
|
||||
const std::string& weights_path,
|
||||
const ov::element::Type& input_type,
|
||||
const ov::PartialShape& input_shape) {
|
||||
ov::pass::Manager manager;
|
||||
manager.register_pass<ov::pass::Serialize>(model_path, weights_path);
|
||||
manager.run_passes(ngraph::builder::subgraph::makeConvPoolRelu(input_shape.to_shape(), input_type));
|
||||
}
|
||||
|
||||
} // namespace TestModel
|
||||
} // namespace FuncTestUtils
|
||||
} // namespace utils
|
||||
} // namespace test
|
||||
} // namespace ov
|
||||
|
Loading…
Reference in New Issue
Block a user