Revert "[op conformance] Made fixes to allign with accuracy validation (#21347)" (#21778)

This reverts commit 8f13219728.
This commit is contained in:
Ilya Lavrenov 2023-12-20 11:41:27 +04:00 committed by GitHub
parent f6cd2150c2
commit 119dc95de6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 10 additions and 117 deletions

View File

@ -212,13 +212,6 @@ std::vector<std::string> disabledTestPatterns() {
R"(.*RandomUniformLayerTestCPU.*OutPrc=i64.*)",
// Issue: 123321
R"(.*smoke_RNNSequenceCommonZeroClip/RNNSequenceTest.Inference.*hidden_size=10.*relu.*)",
// Issue:
R"(.*smoke_LSTMCellCommon/LSTMCellTest.Inference.*\(5.1\)\(5.1\)\(5.1\)\(4.1\)\(4.1\)\(4\).*relu.relu.relu.*_clip=0_.*modelType=f16.*)",
R"(.*smoke_LSTMCellCommon/LSTMCellTest.Inference.*\(5.1\)\(5.10\)\(5.10\)\(40.1\)\(40.10\)\(40\).*relu.relu.relu.*_clip=0_.*modelType=f16.*)",
R"(.*smoke_LSTMSequenceCommonZeroClip/LSTMSequenceTest.Inference/mode=PURE_SEQ_RAND_SEQ_LEN.*relu.relu.relu.*modelType=f16.*)",
R"(.*smoke_MaxPool_ExplicitPad_CeilRounding/PoolingLayerTest.Inference/.*K\(3.3\).*(S\(1.2\)|S\(2.2\)).*PE\(0.2\).*modelType=f16.*)",
R"(.*smoke_MaxPoolv8_ExplicitPad_CeilRounding/MaxPoolingV8LayerTest.Inference/.*K\(3.3\).*(S\(1.2\)|S\(2.2\)).*D\(1.1\).*PE\(0.2\).*modelType=f16.*)",
R"(.*smoke_MemoryTest/MemoryLayerTest.Inference.*trgDev=HETERO:CPU.*)",
// Issue: 123427
R"(.*RDFTLayerTest.*SignalSize=().*)",
// Issue: 123815 (Tests are sensintive to available thread count on testing machines)

View File

@ -38,11 +38,9 @@ protected:
virtual void compile_model();
virtual void infer();
virtual void validate();
virtual void configure_model();
virtual void configure_model();;
virtual void generate_inputs(const std::vector<ov::Shape>& targetInputStaticShapes);
bool is_allowed_convertion(ov::element::Type from, ov::element::Type to);
void get_convert_precision_map();
void update_ref_model();
void match_parameters();
void init_input_shapes(const std::vector<InputShape>& shapes);

View File

@ -295,57 +295,6 @@ void SubgraphBaseTest::configure_model() {
function = p.build();
}
bool SubgraphBaseTest::is_allowed_convertion(ov::element::Type from, ov::element::Type to) {
// based on logic from /src/common/transformations/src/transformations/convert_precision.cpp
// and some restrictions on converting real to integral and vice versa from ops
if (from == to) {
return false;
}
if (from.is_real()) {
if ((from == ov::element::Type_t::f32 && to == ov::element::Type_t::f16) || to == ov::element::Type_t::f32) {
return true;
}
} else if (from.is_integral() && from.is_signed()) {
if ((from == ov::element::Type_t::i8 && to == ov::element::Type_t::i64) ||
(from != ov::element::Type_t::i8 && to == ov::element::Type_t::i32)) {
return true;
}
} else {
// integral and unsigned
if (to == ov::element::Type_t::i32) {
return true;
}
}
return false;
}
void SubgraphBaseTest::get_convert_precision_map() {
try {
ov::element::Type inf_prec = core->get_property(targetDevice, ov::hint::inference_precision);
{
auto& params = function->get_parameters();
for (size_t i = 0; i < params.size(); i++) {
ov::element::Type param_type = params[i]->get_element_type();
if (is_allowed_convertion(param_type, inf_prec)) {
convert_precisions.insert({ param_type , inf_prec });
}
}
}
{
auto results = function->get_results();
for (size_t i = 0; i < results.size(); i++) {
ov::element::Type result_type = results[i]->get_element_type();
if (is_allowed_convertion(result_type, inf_prec)) {
convert_precisions.insert({ result_type , inf_prec });
}
}
}
} catch (const std::exception& ex) {
std::cout << "[ REFERENCE ] Ref model will not be converted to plugin inference presicion: " << ex.what() << std::endl;
}
}
void SubgraphBaseTest::compile_model() {
if (is_report_stages) {
std::cout << "[ PLUGIN ] `SubgraphBaseTest::compile_model()` is started" << std::endl;
@ -354,7 +303,6 @@ void SubgraphBaseTest::compile_model() {
configure_model();
core_configuration(this);
get_convert_precision_map();
compiledModel = core->compile_model(function, targetDevice, configuration);
if (is_report_stages) {
auto end_time = std::chrono::system_clock::now();

View File

@ -217,24 +217,11 @@ ov::runtime::Tensor create_and_fill_tensor_consistently(const ov::element::Type
constexpr double eps = std::numeric_limits<double>::epsilon();
inline double less(double a, double b) {
return std::fabs(a - b) > eps && a < b;
return (b - a) > (std::fmax(std::fabs(a), std::fabs(b)) * eps);
}
inline double less_or_equal(double a, double b) {
bool res = true;
if (std::isnan(a) || std::isnan(b)) {
res = false;
} else if (std::isinf(b) && b > 0) {
// b is grater than any number or eq the +Inf
res = true;
} else if (std::isinf(a) && a > 0) {
res = false;
} else {
res = (std::fabs(b - a) <= (std::fmax(std::fabs(a), std::fabs(b)) * eps) || a < b);
}
double eq_midle_res = std::fabs(b - a);
bool eq_res = (std::fabs(b - a) <= (std::fmax(std::fabs(a), std::fabs(b)) * eps));
return res;
return (b - a) >= (std::fmax(std::fabs(a), std::fabs(b)) * eps);
}
struct Error {
@ -294,33 +281,13 @@ void compare(const ov::Tensor& expected,
if (abs_threshold == std::numeric_limits<double>::max() && rel_threshold == std::numeric_limits<double>::max()) {
if (sizeof(ExpectedT) == 1 || sizeof(ActualT) == 1) {
abs_threshold = 1.;
rel_threshold = 1.;
if (expected.get_element_type() == ov::element::Type_t::boolean) {
abs_threshold = 0.;
rel_threshold = 0.;
}
} else {
std::vector<double> abs_values(shape_size_cnt);
for (size_t i = 0; i < shape_size_cnt; i++) {
abs_values[i] = std::fabs(static_cast<double>(expected_data[i]));
}
auto abs_median = calculate_median(abs_values);
auto elem_type = expected.get_element_type();
abs_threshold = abs_median * 0.05 < 1e-5 ? 1e-5 : 0.05 * abs_median;
if (elem_type == ov::element::Type_t::boolean) {
abs_threshold = 0.;
} else if (elem_type.is_integral_number()) {
abs_threshold = 1.0;
} else if (elem_type == ov::element::Type_t::f32 || elem_type == ov::element::Type_t::f64) {
abs_threshold = abs_median * 0.05 < 1e-5 ? 1e-5 : 0.05 * abs_median;
} else if (elem_type == ov::element::Type_t::bf16 || elem_type == ov::element::Type_t::f16) {
abs_threshold = abs_median * 0.05 < 1e-3 ? 1e-3 : 0.05 * abs_median;
}
rel_threshold = abs_threshold;
if (std::is_integral<ExpectedT>::value) {
abs_threshold = std::ceil(abs_threshold);
}
@ -348,14 +315,14 @@ void compare(const ov::Tensor& expected,
throw std::runtime_error(out_stream.str());
}
double abs = std::fabs(expected_value - actual_value);
double rel = expected_value && !std::isinf(expected_value) ? (abs / std::fabs(expected_value)) : abs;
double rel = expected_value ? (abs / std::fabs(expected_value)) : abs;
abs_error.update(abs, i);
rel_error.update(rel, i);
}
abs_error.mean /= shape_size_cnt;
rel_error.mean /= shape_size_cnt;
if (!(less_or_equal(abs_error.max, abs_threshold) || less_or_equal(rel_error.mean, rel_threshold))) {
if (!(less_or_equal(abs_error.max, abs_threshold) && less_or_equal(rel_error.max, rel_threshold))) {
std::ostringstream out_stream;
out_stream << "abs_max < abs_threshold && rel_max < rel_threshold"
<< "\n\t abs_max: " << abs_error.max << "\n\t\t coordinate " << abs_error.max_coordinate

View File

@ -1,7 +1,6 @@
# Copyright (C) 2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import re
import csv
import os
from argparse import ArgumentParser
@ -41,8 +40,7 @@ def path_to_model(model_path: os.path, prefix: str):
frameworks = {'tf', 'tf2', 'caffe', 'onnx', 'mxnet', 'paddle', 'kaldi'}
precisions = {'FP16', 'FP32', 'INT8', 'INT1'}
# remove share path + model.xml
model_path = model_path.replace('\n', '')
model, _ = os.path.split(re.sub(prefix, '', model_path))
model, _ = os.path.split(model_path.replace('\n', '').replace(prefix, ''))
model, _ = os.path.split(model)
model, _ = os.path.split(model)
model_name, model_framework, model_prc = (None, None, None)
@ -68,21 +66,11 @@ def process_model_list(model_list_file_path: os.path):
with open(model_list_file_path, "r") as model_list_file:
in_models = model_list_file.readlines()
prefix = os.path.commonprefix(in_models)
prefix += '(.*?)/'
for line in in_models:
models.add(path_to_model(line, prefix))
model_list_file.close()
return models
def convert_accuracy_res_to_bool(accuracy_status: str):
conformance_like_status = accuracy_status
if accuracy_status == "improvement":
conformance_like_status = "passed"
elif accuracy_status == "downgrade":
conformance_like_status = "failed"
return conformance_like_status
def process_accuracy(accuracy_res_file: os.path, target_device:str):
if not os.path.isfile(accuracy_res_file):
raise Exception(f"Model filelist: {accuracy_res_file} is not file!")
@ -114,12 +102,12 @@ def process_accuracy(accuracy_res_file: os.path, target_device:str):
model = Model(model_name=row[model_name_row_idx], model_framework=row[framework_row_idx], model_prc=row[precision_row_idx])
if target_device in row[device_row_idx]:
if model in results.keys():
old_status = convert_accuracy_res_to_bool(results[model])
new_status = convert_accuracy_res_to_bool(row[accuracy_status_row_idx])
old_status = results[model]
new_status = row[accuracy_status_row_idx]
if old_status != new_status and (new_status == "passed" or old_status == "not_found" or old_status == ""):
results[model] = new_status
else:
results.update({model: convert_accuracy_res_to_bool(row[accuracy_status_row_idx])})
results.update({model: row[accuracy_status_row_idx]})
csv_file.close()
return results
@ -135,7 +123,6 @@ def process_conformance(failed_models_path:os.path):
model_paths.add(failed_model[0])
csv_file.close()
prefix = os.path.commonprefix(list(model_paths))
prefix += '(.*?)/'
for model in model_paths:
models.update({path_to_model(model, prefix): "failed"})
return models

View File

@ -829,7 +829,7 @@ class TestParallelRunner:
test_results[dir] += 1
else:
test_results[dir] = 1
if dir != "passed" and dir != "skipped":
if dir != "passed":
fix_priority.append((ref_k or 0, test_name))
ref_k = None
test_cnt_real_saved_now += 1