Added pre-proc tests (#2708)

* Added pre-proc test

* Fixed mean image tests

* Disabled scale tests for CPU

* Disabled test for Myriad

* Disable tests for GPU

* Added reverse input channels test

* Disabled test for GPU

* Disable Myriad tests

* Added links to tickets
This commit is contained in:
Ilya Churaev 2020-11-24 10:09:54 +03:00 committed by GitHub
parent 2c13e084ef
commit 6666e5fed3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 264 additions and 2 deletions

View File

@ -35,6 +35,9 @@ std::vector<std::string> disabledTestPatterns() {
#if (defined(_WIN32) || defined(_WIN64))
R"(.*(CoreThreadingTestsWithIterations).*(smoke_LoadNetworkAccuracy).*)",
#endif
// TODO: Issue: 43793
R"(.*(PreprocessTest).*(SetScalePreProcess).*)",
R"(.*(PreprocessTest).*(ReverseInputChannelsPreProcess).*)",
// TODO: Issue: 40957
R"(.*(ConstantResultSubgraphTest).*)",
// TODO: Issue: 34348

View File

@ -24,7 +24,9 @@ std::vector<std::string> disabledTestPatterns() {
R"(.*EltwiseLayerTest.*IS=\(.*\..*\..*\..*\..*\).*eltwiseOpType=Pow.*secondaryInputType=CONSTANT.*)",
// TODO: Issue: 40958
R"(.*(ConstantResultSubgraphTest).*)",
// TODO: Issue: 43794
R"(.*(PreprocessTest).*(SetScalePreProcess).*)",
R"(.*(PreprocessTest).*(ReverseInputChannelsPreProcess).*)",
// TODO: Issue: 41467 -- "unsupported element type f16 op Convert"
R"(.*(ConvertLayerTest).*targetPRC=FP16.*)",
// TODO: Issue: 41466 -- "Unsupported op 'ConvertLike'"

View File

@ -27,6 +27,9 @@ std::vector<std::string> disabledTestPatterns() {
R"(.*TopKLayerTest.*mode=min.*sort=index.*)",
// TODO: Issue: 40961
R"(.*(ConstantResultSubgraphTest).*)",
// TODO: Issue: 43795
R"(.*(PreprocessTest).*(SetMeanValuePreProcess).*)",
R"(.*(PreprocessTest).*(ReverseInputChannelsPreProcess).*)",
// TODO: Issue: 42828
R"(.*DSR_NonMaxSuppression.*NBoxes=(5|20|200).*)",
// TODO: Issue: 42721

View File

@ -5,6 +5,7 @@
#include <vector>
#include <ie_core.hpp>
#include <blob_factory.hpp>
#include "common_test_utils/test_assertions.hpp"
#include "common_test_utils/common_utils.hpp"
#include "functional_test_utils/plugin_cache.hpp"
@ -60,4 +61,257 @@ TEST_P(PreprocessTest, SetPreProcessToInferRequest) {
info->getResizeAlgorithm());
}
}
} // namespace BehaviorTestsDefinitions
TEST_P(PreprocessTest, SetMeanImagePreProcess) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
std::shared_ptr<ngraph::Function> ngraph;
{
ngraph::PartialShape shape({1, 3, 10, 10});
ngraph::element::Type type(ngraph::element::Type_t::f32);
auto param = std::make_shared<ngraph::op::Parameter>(type, shape);
param->set_friendly_name("param");
auto relu = std::make_shared<ngraph::op::Relu>(param);
relu->set_friendly_name("relu");
auto result = std::make_shared<ngraph::op::Result>(relu);
result->set_friendly_name("result");
ngraph::ParameterVector params = {param};
ngraph::ResultVector results = {result};
ngraph = std::make_shared<ngraph::Function>(results, params);
}
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(ngraph);
auto &preProcess = cnnNet.getInputsInfo().begin()->second->getPreProcess();
preProcess.init(3);
for (size_t i = 0; i < 3; i++) {
preProcess[i]->meanData = make_blob_with_precision(InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32,
{10, 10},
InferenceEngine::Layout::HW));
preProcess[i]->meanData->allocate();
auto lockedMem = preProcess[i]->meanData->buffer();
auto* data = lockedMem.as<float *>();
for (size_t j = 0; j < 100; j++) {
data[j] = 0;
data[j] -= i * 100 + j;
}
}
preProcess.setVariant(InferenceEngine::MEAN_IMAGE);
// Load CNNNetwork to target plugins
auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration);
// Create InferRequest
auto req = execNet.CreateInferRequest();
auto inBlob = req.GetBlob("param");
// Fill input
{
auto locketMem = inBlob->buffer();
auto *inData = locketMem.as<float*>();
for (size_t i = 0; i < inBlob->size(); i++)
inData[i] = i;
}
req.Infer();
// Check output
auto outBlob = req.GetBlob(cnnNet.getOutputsInfo().begin()->first);
{
auto inMem = inBlob->cbuffer();
const auto* inData = inMem.as<const float*>();
auto outMem = outBlob->cbuffer();
const auto* outData = outMem.as<const float*>();
ASSERT_EQ(inBlob->size(), outBlob->size());
for (size_t i = 0; i < inBlob->size(); i++)
ASSERT_EQ(inData[i] + inData[i], outData[i]);
}
}
TEST_P(PreprocessTest, SetMeanValuePreProcess) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
std::shared_ptr<ngraph::Function> ngraph;
{
ngraph::PartialShape shape({1, 3, 10, 10});
ngraph::element::Type type(ngraph::element::Type_t::f32);
auto param = std::make_shared<ngraph::op::Parameter>(type, shape);
param->set_friendly_name("param");
auto relu = std::make_shared<ngraph::op::Relu>(param);
relu->set_friendly_name("relu");
auto result = std::make_shared<ngraph::op::Result>(relu);
result->set_friendly_name("result");
ngraph::ParameterVector params = {param};
ngraph::ResultVector results = {result};
ngraph = std::make_shared<ngraph::Function>(results, params);
}
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(ngraph);
auto &preProcess = cnnNet.getInputsInfo().begin()->second->getPreProcess();
preProcess.init(3);
preProcess[0]->meanValue = -5;
preProcess[1]->meanValue = -5;
preProcess[2]->meanValue = -5;
preProcess[0]->stdScale = 1;
preProcess[1]->stdScale = 1;
preProcess[2]->stdScale = 1;
preProcess.setVariant(InferenceEngine::MEAN_VALUE);
// Load CNNNetwork to target plugins
auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration);
// Create InferRequest
auto req = execNet.CreateInferRequest();
auto inBlob = req.GetBlob("param");
// Fill input
{
auto locketMem = inBlob->buffer();
auto *inData = locketMem.as<float*>();
for (size_t i = 0; i < inBlob->size(); i++)
inData[i] = i;
}
req.Infer();
// Check output
auto outBlob = req.GetBlob(cnnNet.getOutputsInfo().begin()->first);
{
auto inMem = inBlob->cbuffer();
const auto* inData = inMem.as<const float*>();
auto outMem = outBlob->cbuffer();
const auto* outData = outMem.as<const float*>();
ASSERT_EQ(inBlob->size(), outBlob->size());
for (size_t i = 0; i < inBlob->size(); i++)
ASSERT_EQ(inData[i]+5, outData[i]);
}
}
TEST_P(PreprocessTest, ReverseInputChannelsPreProcess) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
std::shared_ptr<ngraph::Function> ngraph;
{
ngraph::PartialShape shape({1, 3, 10, 10});
ngraph::element::Type type(ngraph::element::Type_t::f32);
auto param = std::make_shared<ngraph::op::Parameter>(type, shape);
param->set_friendly_name("param");
auto relu = std::make_shared<ngraph::op::Relu>(param);
relu->set_friendly_name("relu");
auto result = std::make_shared<ngraph::op::Result>(relu);
result->set_friendly_name("result");
ngraph::ParameterVector params = {param};
ngraph::ResultVector results = {result};
ngraph = std::make_shared<ngraph::Function>(results, params);
}
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(ngraph);
auto &preProcess = cnnNet.getInputsInfo().begin()->second->getPreProcess();
preProcess.setColorFormat(InferenceEngine::ColorFormat::RGB);
// Load CNNNetwork to target plugins
auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration);
// Create InferRequest
auto req = execNet.CreateInferRequest();
auto inBlob = req.GetBlob("param");
// Fill input
{
auto locketMem = inBlob->buffer();
auto *inData = locketMem.as<float*>();
for (size_t i = 0; i < inBlob->size(); i++)
inData[i] = i;
}
req.Infer();
// Check output
auto outBlob = req.GetBlob(cnnNet.getOutputsInfo().begin()->first);
{
auto inMem = inBlob->cbuffer();
const auto* inData = inMem.as<const float*>();
auto outMem = outBlob->cbuffer();
const auto* outData = outMem.as<const float*>();
ASSERT_EQ(inBlob->size(), outBlob->size());
for (size_t i = 0; i < 3; i++)
for (size_t j = 0; j < 100; j++) {
// BGR to RGB
if (!i) {
ASSERT_EQ(inData[j], outData[200 + j]);
} else if (i == j) {
ASSERT_EQ(inData[100 + j], outData[100 + j]);
} else {
ASSERT_EQ(inData[200 + j], outData[j]);
}
}
}
}
TEST_P(PreprocessTest, SetScalePreProcess) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()
std::shared_ptr<ngraph::Function> ngraph;
{
ngraph::PartialShape shape({1, 3, 10, 10});
ngraph::element::Type type(ngraph::element::Type_t::f32);
auto param = std::make_shared<ngraph::op::Parameter>(type, shape);
param->set_friendly_name("param");
auto relu = std::make_shared<ngraph::op::Relu>(param);
relu->set_friendly_name("relu");
auto result = std::make_shared<ngraph::op::Result>(relu);
result->set_friendly_name("result");
ngraph::ParameterVector params = {param};
ngraph::ResultVector results = {result};
ngraph = std::make_shared<ngraph::Function>(results, params);
}
// Create CNNNetwork from ngrpah::Function
InferenceEngine::CNNNetwork cnnNet(ngraph);
auto &preProcess = cnnNet.getInputsInfo().begin()->second->getPreProcess();
preProcess.init(3);
preProcess[0]->stdScale = 2;
preProcess[1]->stdScale = 2;
preProcess[2]->stdScale = 2;
preProcess[0]->meanValue = 0;
preProcess[1]->meanValue = 0;
preProcess[2]->meanValue = 0;
preProcess.setVariant(InferenceEngine::MEAN_VALUE);
// Load CNNNetwork to target plugins
auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration);
// Create InferRequest
auto req = execNet.CreateInferRequest();
auto inBlob = req.GetBlob("param");
// Fill input
{
auto locketMem = inBlob->buffer();
auto *inData = locketMem.as<float*>();
for (size_t i = 0; i < inBlob->size(); i++)
inData[i] = i;
}
req.Infer();
// Check output
auto outBlob = req.GetBlob(cnnNet.getOutputsInfo().begin()->first);
{
auto inMem = inBlob->cbuffer();
const auto* inData = inMem.as<const float*>();
auto outMem = outBlob->cbuffer();
const auto* outData = outMem.as<const float*>();
ASSERT_EQ(inBlob->size(), outBlob->size());
for (size_t i = 0; i < inBlob->size(); i++)
ASSERT_EQ(inData[i]*2, outData[i]);
}
}
} // namespace BehaviorTestsDefinitions