Use 'u8' element type in tests (#7704)

Use 'float32' output for 'compare with reference' tests to avoid IE precisions undesired conversions
Added GPU preprocessing tests skeleton
This commit is contained in:
Mikhail Nosov
2021-09-29 09:46:37 +03:00
committed by GitHub
parent bd09f70876
commit 4fd59f72ea
5 changed files with 84 additions and 18 deletions

View File

@@ -101,23 +101,44 @@ static RefPreprocessParams scale_then_mean() {
return res;
}
static RefPreprocessParams convert_only() {
RefPreprocessParams res("convert_only");
res.function = []() {
auto f = create_simple_function(element::f32, Shape{1, 1, 2, 2});
f = PrePostProcessor().input(InputInfo()
.tensor(InputTensorInfo().set_element_type(element::i16))
.preprocess(PreProcessSteps()
.convert_element_type(element::f32)
.scale(3.f)
.convert_element_type(element::u8)
.convert_element_type(element::f32)))
.build(f);
return f;
};
res.inputs.emplace_back(Shape{1, 1, 2, 2}, element::i16, std::vector<int16_t>{2, 3, 4, 5});
res.expected.emplace_back(Shape{1, 1, 2, 2}, element::f32, std::vector<float>{0., 1., 1., 1.});
return res;
}
static RefPreprocessParams convert_element_type_and_scale() {
RefPreprocessParams res("convert_element_type_and_scale");
res.function = []() {
auto f = create_simple_function(element::i8, Shape{1, 3, 2, 2});
auto f = create_simple_function(element::u8, Shape{1, 3, 2, 2});
f = PrePostProcessor()
.input(InputInfo()
.tensor(InputTensorInfo().set_element_type(element::i16))
.preprocess(PreProcessSteps()
.convert_element_type(element::f32)
.scale(2.f)
.convert_element_type(element::i8)))
.convert_element_type(element::u8)))
.build(f);
return f;
};
res.inputs.emplace_back(Shape{1, 3, 2, 2}, element::i16, std::vector<int16_t>{2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 10000, 200});
res.expected.emplace_back(Shape{1, 3, 2, 2}, element::i8, std::vector<int8_t>{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, (int8_t)5000, 100});
res.inputs.emplace_back(Shape{1, 3, 2, 2}, element::i16,
std::vector<int16_t>{2, 3, 6, 8, 10, 12, 14, 16, 18, 20, 10000, 200});
res.expected.emplace_back(Shape{1, 3, 2, 2}, element::u8,
std::vector<uint8_t>{1, 1, 3, 4, 5, 6, 7, 8, 9, 10, (uint8_t)5000, 100});
return res;
}
@@ -254,6 +275,7 @@ std::vector<RefPreprocessParams> allPreprocessTests() {
return std::vector<RefPreprocessParams> {
simple_mean_scale(),
scale_then_mean(),
convert_only(),
convert_element_type_and_scale(),
tensor_element_type_and_scale(),
custom_preprocessing(),

View File

@@ -71,6 +71,8 @@ std::vector<std::string> disabledTestPatterns() {
R"(.*Behavior_Multi.*InferRequestSetBlobByType.*Batched.*)",
R"(.*(Multi|Auto).*Behavior.*InferRequestIOBBlobTest.*canProcessDeallocatedOutputBlobAfterGetAndSetBlob.*)",
// TODO: until issue is xxx-59670 is resolved
R"(.*Gather8LayerTest.*)"
R"(.*Gather8LayerTest.*)",
// TODO: Issue 66516
R"(.*smoke_PrePostProcess_GPU.*convert_element_type_and_mean.*)"
};
}

View File

@@ -0,0 +1,29 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include "shared_test_classes/subgraph/preprocess.hpp"
using namespace SubgraphTestsDefinitions;
namespace {
using namespace ov::builder::preprocess;
inline std::vector<preprocess_func> GPU_smoke_preprocess_functions() {
return std::vector<preprocess_func>{
preprocess_func(mean_only, "mean_only", 0.01f),
preprocess_func(scale_only, "scale_only", 0.01f),
preprocess_func(convert_element_type_and_mean, "convert_element_type_and_mean", 0.01f),
};
}
INSTANTIATE_TEST_SUITE_P(smoke_PrePostProcess_GPU, PrePostProcessTest,
::testing::Combine(
::testing::ValuesIn(GPU_smoke_preprocess_functions()),
::testing::Values(CommonTestUtils::DEVICE_GPU)),
PrePostProcessTest::getTestCaseName);
} // namespace

View File

@@ -29,7 +29,6 @@ void PrePostProcessTest::SetUp() {
preprocess_func func;
std::tie(func, targetDevice) = GetParam();
function = (std::get<0>(func))();
outPrc = InferenceEngine::details::convertPrecision(function->get_output_element_type(0));
threshold = std::get<2>(func);
}

View File

@@ -22,7 +22,13 @@ inline std::shared_ptr<Function> create_preprocess_1input(element::Type type,
auto data1 = std::make_shared<op::v0::Parameter>(type, shape);
data1->set_friendly_name("input1");
data1->output(0).get_tensor().set_names({"input1"});
auto res = std::make_shared<op::v0::Result>(data1);
std::shared_ptr<op::v0::Result> res;
if (type == element::f32) {
res = std::make_shared<op::v0::Result>(data1);
} else {
auto convert = std::make_shared<op::v0::Convert>(data1, element::f32);
res = std::make_shared<op::v0::Result>(convert);
}
res->set_friendly_name("Result");
return std::make_shared<Function>(ResultVector{res}, ParameterVector{data1});
}
@@ -35,9 +41,17 @@ inline std::shared_ptr<Function> create_preprocess_2inputs(element::Type type,
auto data2 = std::make_shared<op::v0::Parameter>(type, shape);
data2->set_friendly_name("input2");
data2->output(0).get_tensor().set_names({"input2"});
auto res1 = std::make_shared<op::v0::Result>(data1);
std::shared_ptr<op::v0::Result> res1, res2;
if (type == element::f32) {
res1 = std::make_shared<op::v0::Result>(data1);
res2 = std::make_shared<op::v0::Result>(data2);
} else {
auto convert1 = std::make_shared<op::v0::Convert>(data1, element::f32);
res1 = std::make_shared<op::v0::Result>(convert1);
auto convert2 = std::make_shared<op::v0::Convert>(data2, element::f32);
res2 = std::make_shared<op::v0::Result>(convert2);
}
res1->set_friendly_name("Result1");
auto res2 = std::make_shared<op::v0::Result>(data2);
res2->set_friendly_name("Result2");
return std::make_shared<Function>(ResultVector{res1, res2}, ParameterVector{data1, data2});
}
@@ -90,24 +104,24 @@ inline std::shared_ptr<Function> scale_vector() {
inline std::shared_ptr<Function> convert_element_type_and_mean() {
using namespace ov::preprocess;
auto function = create_preprocess_1input(element::i8, Shape{1, 3, 24, 24});
auto function = create_preprocess_1input(element::u8, Shape{1, 3, 24, 24});
function = PrePostProcessor()
.input(InputInfo()
.preprocess(PreProcessSteps()
.convert_element_type(element::f32)
.mean(0.2f)
.convert_element_type(element::i8)))
.convert_element_type(element::u8)))
.build(function);
return function;
}
inline std::shared_ptr<Function> tensor_element_type_and_mean() {
using namespace ov::preprocess;
auto function = create_preprocess_1input(element::i8, Shape{1, 3, 12, 12});
auto function = create_preprocess_1input(element::u8, Shape{1, 3, 12, 12});
function = PrePostProcessor()
.input(InputInfo()
.tensor(InputTensorInfo().set_element_type(element::f32))
.preprocess(PreProcessSteps().mean(0.1f).convert_element_type(element::i8)))
.preprocess(PreProcessSteps().mean(0.1f).convert_element_type(element::u8)))
.build(function);
return function;
}
@@ -127,7 +141,7 @@ inline std::shared_ptr<Function> custom_preprocessing() {
inline std::shared_ptr<Function> lvalues_multiple_ops() {
using namespace ov::preprocess;
auto function = create_preprocess_1input(element::i8, Shape{1, 3, 3, 3});
auto function = create_preprocess_1input(element::u8, Shape{1, 3, 3, 3});
auto p = PrePostProcessor();
auto p1 = std::move(p);
p = std::move(p1);
@@ -155,7 +169,7 @@ inline std::shared_ptr<Function> lvalues_multiple_ops() {
abs->set_friendly_name(node->get_friendly_name() + "/abs");
return abs;
});
auto& same = preprocessSteps.convert_element_type(element::i8);
auto& same = preprocessSteps.convert_element_type(element::u8);
inputInfo.preprocess(std::move(same));
}
p.input(std::move(inputInfo));
@@ -200,10 +214,10 @@ inline std::vector<preprocess_func> generic_preprocess_functions() {
preprocess_func(scale_mean, "scale_mean", 0.01f),
preprocess_func(mean_vector, "mean_vector", 0.01f),
preprocess_func(scale_vector, "scale_vector", 0.01f),
preprocess_func(convert_element_type_and_mean, "convert_element_type_and_mean", 1.f),
preprocess_func(tensor_element_type_and_mean, "tensor_element_type_and_mean", 1.f),
preprocess_func(convert_element_type_and_mean, "convert_element_type_and_mean", 0.01f),
preprocess_func(tensor_element_type_and_mean, "tensor_element_type_and_mean", 0.01f),
preprocess_func(custom_preprocessing, "custom_preprocessing", 0.01f),
preprocess_func(lvalues_multiple_ops, "lvalues_multiple_ops", 1.f),
preprocess_func(lvalues_multiple_ops, "lvalues_multiple_ops", 0.01f),
preprocess_func(two_inputs_basic, "two_inputs_basic", 0.01f),
preprocess_func(reuse_network_layout, "reuse_network_layout", 0.01f),
preprocess_func(tensor_layout, "tensor_layout", 0.01f),