[IE TESTS] Functional test review: Part 4 (#10772)

* [IE TESTS] Move specific import_export_tests to gna and myriad

* add
This commit is contained in:
Irina Efode 2022-03-04 14:42:16 +03:00 committed by GitHub
parent ed702910bd
commit 32edd596e3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 275 additions and 264 deletions

View File

@ -2,7 +2,7 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "base/import_export_base/import_export_base.hpp" #include "import_export_base.hpp"
#include <fstream> #include <fstream>

View File

@ -13,7 +13,7 @@
#include <ie_layouts.h> #include <ie_layouts.h>
#include "ngraph_functions/builders.hpp" #include "ngraph_functions/builders.hpp"
#include "base/import_export_base/import_export_base.hpp" #include "base/import_export_base.hpp"
namespace LayerTestDefinitions { namespace LayerTestDefinitions {

View File

@ -10,7 +10,7 @@
#include <fstream> #include <fstream>
#include "ngraph_functions/builders.hpp" #include "ngraph_functions/builders.hpp"
#include "base/import_export_base/import_export_base.hpp" #include "base/import_export_base.hpp"
namespace LayerTestsDefinitions { namespace LayerTestsDefinitions {

View File

@ -0,0 +1,156 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "base/import_export_base.hpp"
#include "ngraph_functions/builders.hpp"
namespace LayerTestsDefinitions {
class ImportReshapePermuteConv : public FuncTestUtils::ImportNetworkTestBase {
protected:
void SetUp() override {
std::vector<size_t> inputShape;
InferenceEngine::Precision netPrecision;
std::tie(inputShape, netPrecision, targetDevice, exportConfiguration, importConfiguration, applicationHeader) = this->GetParam();
auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
auto params = ngraph::builder::makeParams(ngPrc, { inputShape });
std::vector<size_t> outFormShapes1 = { 1, 1, 168, 2 };
auto pattern1 = std::make_shared<ngraph::opset1::Constant>(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, outFormShapes1);
auto reshape1 = std::make_shared<ngraph::opset1::Reshape>(params[0], pattern1, false);
auto permute1 = std::make_shared<ngraph::opset1::Transpose>(reshape1,
ngraph::opset1::Constant::create(ngraph::element::i64, ngraph::Shape{ 4 }, { 0, 3, 1, 2 }));
auto conv1 = ngraph::builder::makeConvolution(permute1, ngPrc, { 1, 8 }, { 1, 1 }, { 0, 0 }, { 0, 0 }, { 1, 1 },
ngraph::op::PadType::VALID, 12);
auto permute2 = std::make_shared<ngraph::opset1::Transpose>(conv1,
ngraph::opset1::Constant::create(ngraph::element::i64, ngraph::Shape{ 4 }, { 0, 2, 3, 1 }));
std::vector<size_t> outFormShapes2 = { 1, 1932 };
auto pattern2 = std::make_shared<ngraph::opset1::Constant>(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes2);
auto reshape2 = std::make_shared<ngraph::opset1::Reshape>(permute2, pattern2, false);
ngraph::ResultVector results{ std::make_shared<ngraph::opset1::Result>(reshape2) };
function = std::make_shared<ngraph::Function>(results, params, "ExportImportNetwork");
};
};
TEST_P(ImportReshapePermuteConv, CompareWithRefImpl) {
Run();
};
} // namespace LayerTestsDefinitions
using namespace LayerTestsDefinitions;
namespace {
class ImportExportGNAModelUnchanged : public ImportReshapePermuteConv {
private:
void exportImportNetwork() override {
{
std::ofstream out(fileName);
out.write(applicationHeader.c_str(), applicationHeader.size());
executableNetwork.Export(out);
}
{
std::string appHeader(applicationHeader.size(), ' ');
std::fstream inputStream(fileName, std::ios_base::in | std::ios_base::binary);
if (inputStream.fail()) {
FAIL() << "Cannot open file to import model: " << fileName;
}
inputStream.read(&appHeader[0], applicationHeader.size());
ASSERT_EQ(appHeader, applicationHeader);
executableNetwork = core->ImportNetwork(inputStream, targetDevice, configuration);
}
}
protected:
void TearDown() override {
if (remove(fileName.c_str()) != 0) {
FAIL() << "Error: could not delete file " << fileName;
}
}
private:
std::string fileName = "exported_model.blob";
};
class ImportExportGNAModelChanged : public ImportExportGNAModelUnchanged {};
TEST_P(ImportExportGNAModelUnchanged, ReshapePermuteConv) {
TestRun(false);
};
TEST_P(ImportExportGNAModelChanged, ReshapePermuteConv) {
TestRun(true);
};
const std::vector<std::vector<size_t>> inputShapes = {
{1, 336}
};
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16
};
const std::vector<std::map<std::string, std::string>> exportConfigs = {
{
{"GNA_DEVICE_MODE", "GNA_SW_EXACT"},
{"GNA_SCALE_FACTOR_0", "327.67"}
}
};
const std::vector<std::map<std::string, std::string>> importConfigsChanged = {
{
{"GNA_DEVICE_MODE", "GNA_SW_EXACT"},
{"GNA_SCALE_FACTOR_0", "32767"}
}
};
const std::vector<std::map<std::string, std::string>> importConfigsUnchanged = {
{
{"GNA_DEVICE_MODE", "GNA_SW_EXACT"},
{"GNA_SCALE_FACTOR_0", "327.67"}
},
{
{"GNA_DEVICE_MODE", "GNA_SW_EXACT"},
{"GNA_SCALE_FACTOR_0", "1"}
},
{
{"GNA_DEVICE_MODE", "GNA_SW_EXACT"}
}
};
const std::vector<std::string> appHeaders = {
"",
"APPLICATION_HEADER"
};
INSTANTIATE_TEST_SUITE_P(smoke_ImportNetworkGNA, ImportExportGNAModelUnchanged,
::testing::Combine(
::testing::ValuesIn(inputShapes),
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_GNA),
::testing::ValuesIn(exportConfigs),
::testing::ValuesIn(importConfigsUnchanged),
::testing::ValuesIn(appHeaders)),
ImportExportGNAModelUnchanged::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_ImportNetworkGNA, ImportExportGNAModelChanged,
::testing::Combine(
::testing::ValuesIn(inputShapes),
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_GNA),
::testing::ValuesIn(exportConfigs),
::testing::ValuesIn(importConfigsChanged),
::testing::ValuesIn(appHeaders)),
ImportExportGNAModelChanged::getTestCaseName);
} // namespace

View File

@ -1,117 +0,0 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "import_export_tests/import_reshape_permute_conv.hpp"
#include <fstream>
#include <stdio.h>
using namespace LayerTestsDefinitions;
namespace {
class ImportExportGNAModelUnchanged : public ImportReshapePermuteConv {
private:
void exportImportNetwork() override {
{
std::ofstream out(fileName);
out.write(applicationHeader.c_str(), applicationHeader.size());
executableNetwork.Export(out);
}
{
std::string appHeader(applicationHeader.size(), ' ');
std::fstream inputStream(fileName, std::ios_base::in | std::ios_base::binary);
if (inputStream.fail()) {
FAIL() << "Cannot open file to import model: " << fileName;
}
inputStream.read(&appHeader[0], applicationHeader.size());
ASSERT_EQ(appHeader, applicationHeader);
executableNetwork = core->ImportNetwork(inputStream, targetDevice, configuration);
}
}
protected:
void TearDown() override {
if (remove(fileName.c_str()) != 0) {
FAIL() << "Error: could not delete file " << fileName;
}
}
private:
std::string fileName = "exported_model.blob";
};
class ImportExportGNAModelChanged : public ImportExportGNAModelUnchanged {};
TEST_P(ImportExportGNAModelUnchanged, ReshapePermuteConv) {
TestRun(false);
};
TEST_P(ImportExportGNAModelChanged, ReshapePermuteConv) {
TestRun(true);
};
const std::vector<std::vector<size_t>> inputShapes = {
{1, 336}
};
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16
};
const std::vector<std::map<std::string, std::string>> exportConfigs = {
{
{"GNA_DEVICE_MODE", "GNA_SW_EXACT"},
{"GNA_SCALE_FACTOR_0", "327.67"}
}
};
const std::vector<std::map<std::string, std::string>> importConfigsChanged = {
{
{"GNA_DEVICE_MODE", "GNA_SW_EXACT"},
{"GNA_SCALE_FACTOR_0", "32767"}
}
};
const std::vector<std::map<std::string, std::string>> importConfigsUnchanged = {
{
{"GNA_DEVICE_MODE", "GNA_SW_EXACT"},
{"GNA_SCALE_FACTOR_0", "327.67"}
},
{
{"GNA_DEVICE_MODE", "GNA_SW_EXACT"},
{"GNA_SCALE_FACTOR_0", "1"}
},
{
{"GNA_DEVICE_MODE", "GNA_SW_EXACT"}
}
};
const std::vector<std::string> appHeaders = {
"",
"APPLICATION_HEADER"
};
INSTANTIATE_TEST_SUITE_P(smoke_ImportNetworkGNA, ImportExportGNAModelUnchanged,
::testing::Combine(
::testing::ValuesIn(inputShapes),
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_GNA),
::testing::ValuesIn(exportConfigs),
::testing::ValuesIn(importConfigsUnchanged),
::testing::ValuesIn(appHeaders)),
ImportExportGNAModelUnchanged::getTestCaseName);
INSTANTIATE_TEST_SUITE_P(smoke_ImportNetworkGNA, ImportExportGNAModelChanged,
::testing::Combine(
::testing::ValuesIn(inputShapes),
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_GNA),
::testing::ValuesIn(exportConfigs),
::testing::ValuesIn(importConfigsChanged),
::testing::ValuesIn(appHeaders)),
ImportExportGNAModelChanged::getTestCaseName);
} // namespace

View File

@ -0,0 +1,116 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph/opsets/opset5.hpp"
#include "shared_test_classes/base/layer_test_utils.hpp"
namespace LayerTestsDefinitions {
typedef std::tuple<
std::vector<size_t>, // Input Shape
InferenceEngine::Precision, // Network Precision
std::string, // Target Device
std::string // Application Header
> exportImportNetworkParams;
class ImportNonZero : public testing::WithParamInterface<exportImportNetworkParams>,
virtual public LayerTestsUtils::LayerTestsCommon {
protected:
void SetUp() override {
InferenceEngine::Precision netPrecision;
ngraph::Shape inputShape;
std::tie(inputShape, netPrecision, targetDevice, applicationHeader) = this->GetParam();
const auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
const auto parameter = std::make_shared<ngraph::opset5::Parameter>(ngPrc, inputShape);
const auto nonZero = std::make_shared<ngraph::opset5::NonZero>(parameter);
function = std::make_shared<ngraph::Function>(nonZero->outputs(), ngraph::ParameterVector{parameter}, "ExportImportNetwork");
functionRefs = ngraph::clone_function(*function);
}
void exportImportNetwork() {
std::stringstream strm;
strm.write(applicationHeader.c_str(), applicationHeader.size());
executableNetwork.Export(strm);
strm.seekg(0, strm.beg);
std::string appHeader(applicationHeader.size(), ' ');
strm.read(&appHeader[0], applicationHeader.size());
ASSERT_EQ(appHeader, applicationHeader);
executableNetwork = core->ImportNetwork(strm, targetDevice, configuration);
}
void Run() override {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
functionRefs = ngraph::clone_function(*function);
// load export configuration and save outputs
LoadNetwork();
GenerateInputs();
Infer();
auto actualOutputs = GetOutputs();
auto referenceOutputs = CalculateRefs();
Compare(referenceOutputs, actualOutputs);
const auto compiledExecNetwork = executableNetwork;
exportImportNetwork();
const auto importedExecNetwork = executableNetwork;
GenerateInputs();
Infer();
ASSERT_EQ(importedExecNetwork.GetInputsInfo().size(), compiledExecNetwork.GetInputsInfo().size());
ASSERT_EQ(importedExecNetwork.GetOutputsInfo().size(), compiledExecNetwork.GetOutputsInfo().size());
for (const auto& next_input : importedExecNetwork.GetInputsInfo()) {
ASSERT_NO_THROW(compiledExecNetwork.GetInputsInfo()[next_input.first]);
Compare(next_input.second->getTensorDesc(), compiledExecNetwork.GetInputsInfo()[next_input.first]->getTensorDesc());
}
for (const auto& next_output : importedExecNetwork.GetOutputsInfo()) {
ASSERT_NO_THROW(compiledExecNetwork.GetOutputsInfo()[next_output.first]);
}
auto importedOutputs = GetOutputs();
ASSERT_EQ(actualOutputs.size(), importedOutputs.size());
for (size_t i = 0; i < actualOutputs.size(); i++) {
Compare(actualOutputs[i]->getTensorDesc(), importedOutputs[i]->getTensorDesc());
Compare(actualOutputs[i], importedOutputs[i]);
}
}
std::string applicationHeader;
};
TEST_P(ImportNonZero, CompareWithRefImpl) {
Run();
};
} // namespace LayerTestsDefinitions
using namespace LayerTestsDefinitions;
namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
};
const std::vector<std::string> appHeaders = {
"",
"APPLICATION_HEADER"
};
std::vector<size_t> inputShape = ngraph::Shape{1000};
INSTANTIATE_TEST_SUITE_P(smoke_ImportNetworkCase, ImportNonZero,
::testing::Combine(
::testing::Values(inputShape),
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_MYRIAD),
::testing::ValuesIn(appHeaders)));
} // namespace

View File

@ -1,41 +0,0 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "import_export_tests/import_nonzero.hpp"
#include "vpu/private_plugin_config.hpp"
using namespace LayerTestsDefinitions;
namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
};
const std::vector<std::map<std::string, std::string>> exportConfigs = {
{}
};
const std::vector<std::map<std::string, std::string>> importConfigs = {
{}
};
const std::vector<std::string> appHeaders = {
"",
"APPLICATION_HEADER"
};
std::vector<size_t> inputShape = ngraph::Shape{1000};
INSTANTIATE_TEST_SUITE_P(smoke_ImportNetworkCase, ImportNonZero,
::testing::Combine(
::testing::Values(inputShape),
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_MYRIAD),
::testing::ValuesIn(exportConfigs),
::testing::ValuesIn(importConfigs),
::testing::ValuesIn(appHeaders)),
ImportNonZero::getTestCaseName);
} // namespace

View File

@ -1,16 +0,0 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "base/import_export_base/import_export_base.hpp"
namespace LayerTestsDefinitions {
class ImportNonZero : public FuncTestUtils::ImportNetworkTestBase {
protected:
void SetUp() override;
};
} // namespace LayerTestsDefinitions

View File

@ -1,16 +0,0 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "base/import_export_base/import_export_base.hpp"
namespace LayerTestsDefinitions {
class ImportReshapePermuteConv : public FuncTestUtils::ImportNetworkTestBase {
protected:
void SetUp() override;
};
} // namespace LayerTestsDefinitions

View File

@ -1,27 +0,0 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "import_export_tests/import_nonzero.hpp"
#include "ngraph/opsets/opset5.hpp"
namespace LayerTestsDefinitions {
void ImportNonZero::SetUp() {
InferenceEngine::Precision netPrecision;
ngraph::Shape inputShape;
std::tie(inputShape, netPrecision, targetDevice, exportConfiguration, importConfiguration, applicationHeader) = this->GetParam();
const auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
const auto parameter = std::make_shared<ngraph::opset5::Parameter>(ngPrc, inputShape);
const auto nonZero = std::make_shared<ngraph::opset5::NonZero>(parameter);
function = std::make_shared<ngraph::Function>(nonZero->outputs(), ngraph::ParameterVector{parameter}, "ExportImportNetwork");
}
TEST_P(ImportNonZero, CompareWithRefImpl) {
Run();
};
} // namespace LayerTestsDefinitions

View File

@ -1,44 +0,0 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "import_export_tests/import_reshape_permute_conv.hpp"
#include "ngraph_functions/builders.hpp"
namespace LayerTestsDefinitions {
void ImportReshapePermuteConv::SetUp() {
std::vector<size_t> inputShape;
InferenceEngine::Precision netPrecision;
std::tie(inputShape, netPrecision, targetDevice, exportConfiguration, importConfiguration, applicationHeader) = this->GetParam();
auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
auto params = ngraph::builder::makeParams(ngPrc, { inputShape });
std::vector<size_t> outFormShapes1 = { 1, 1, 168, 2 };
auto pattern1 = std::make_shared<ngraph::opset1::Constant>(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, outFormShapes1);
auto reshape1 = std::make_shared<ngraph::opset1::Reshape>(params[0], pattern1, false);
auto permute1 = std::make_shared<ngraph::opset1::Transpose>(reshape1,
ngraph::opset1::Constant::create(ngraph::element::i64, ngraph::Shape{ 4 }, { 0, 3, 1, 2 }));
auto conv1 = ngraph::builder::makeConvolution(permute1, ngPrc, { 1, 8 }, { 1, 1 }, { 0, 0 }, { 0, 0 }, { 1, 1 },
ngraph::op::PadType::VALID, 12);
auto permute2 = std::make_shared<ngraph::opset1::Transpose>(conv1,
ngraph::opset1::Constant::create(ngraph::element::i64, ngraph::Shape{ 4 }, { 0, 2, 3, 1 }));
std::vector<size_t> outFormShapes2 = { 1, 1932 };
auto pattern2 = std::make_shared<ngraph::opset1::Constant>(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes2);
auto reshape2 = std::make_shared<ngraph::opset1::Reshape>(permute2, pattern2, false);
ngraph::ResultVector results{ std::make_shared<ngraph::opset1::Result>(reshape2) };
function = std::make_shared<ngraph::Function>(results, params, "ExportImportNetwork");
}
TEST_P(ImportReshapePermuteConv, CompareWithRefImpl) {
Run();
};
} // namespace LayerTestsDefinitions