[VPU][TESTS][GNA] Fix dynamic models import on VPU (#3427)

* [VPU] Fix dynamic networks import

* [IE][GNA][TESTS] Move ImportExport tests from GNA to shared part

* [VPU][Tests] Add ExportImport test for dynamic network

* [VPU] Review fixes

* [VPU][Tests] Review and test fixes

* [VPU][Tests] Move TEST_P to shared part
This commit is contained in:
Andrew Bakalin 2020-12-08 15:19:27 +03:00 committed by GitHub
parent 86347bd909
commit e81201ea35
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
15 changed files with 411 additions and 292 deletions

View File

@ -0,0 +1,15 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <string>
namespace vpu {
std::string createIOShapeName(std::string srcName);
bool isIOShapeName(std::string name);
} //namespace vpu

View File

@ -14,6 +14,7 @@
#include <vpu/graph_transformer.hpp>
#include <vpu/backend/blob_format.hpp>
#include <vpu/model/data.hpp>
#include <vpu/utils/shape_io.hpp>
namespace vpu {
@ -49,101 +50,71 @@ void BlobReader::parse(const std::vector<char>& blob) {
_inputInfo.totalSize = _blobHeader.inputs_size;
_outputInfo.totalSize = _blobHeader.outputs_size;
auto inputInfoSecOffset = _blobHeader.input_info_section_offset;
const auto readIO = [this, &blob](DataInfo& ioInfo, uint32_t& ioSectionOffset, uint32_t idx) {
auto ioIdx = readFromBlob<uint32_t>(blob, ioSectionOffset);
VPU_THROW_UNLESS(ioIdx == idx, "BlobReader failed on I/O processing, its' ioIdx parameter (which is {}) is "
"different from its' processing order (which is {})", ioIdx, idx);
auto ioBufferOffset = readFromBlob<int32_t>(blob, ioSectionOffset);
auto nameLength = readFromBlob<uint32_t>(blob, ioSectionOffset);
std::string ioName(nameLength, 0);
for (auto& c : ioName) {
c = readFromBlob<char>(blob, ioSectionOffset);
}
// Truncate zeros
ioName = ioName.c_str();
auto dataType = readFromBlob<DataType>(blob, ioSectionOffset);
auto orderCode = readFromBlob<uint32_t>(blob, ioSectionOffset);
auto numDims = readFromBlob<uint32_t>(blob, ioSectionOffset);
auto dimsOrder = DimsOrder::fromCode(orderCode);
auto perm = dimsOrder.toPermutation();
IE_ASSERT(perm.size() == numDims);
auto dimsLocation = readFromBlob<Location>(blob, ioSectionOffset);
VPU_THROW_UNLESS(dimsLocation == Location::Blob,
"BlobReader error while parsing data {}: only Blob location for input/output shape is supported, but {} was given",
ioName, dimsLocation);
auto dimsOffset = _blobHeader.const_data_section_offset + readFromBlob<uint32_t>(blob, ioSectionOffset);
// Skip strides' location and offset
ioSectionOffset += 2 * sizeof(uint32_t);
DimValues vpuDims;
for (const auto& dim : perm) {
vpuDims.set(dim, readFromBlob<uint32_t>(blob, dimsOffset));
}
ie::TensorDesc ieDesc = DataDesc(dataType, dimsOrder, vpuDims).toTensorDesc();
ie::Data ioData(ioName, ieDesc);
ioInfo.offset[ioName] = ioBufferOffset;
ioInfo.descFromPlugin[ioName] = ieDesc;
return ioData;
};
auto inputSectionOffset = _blobHeader.input_info_section_offset;
for (uint32_t i = 0; i < _blobHeader.inputs_count; i++) {
auto ioIdx = readFromBlob<uint32_t>(blob, inputInfoSecOffset);
IE_ASSERT(ioIdx == i);
auto ioBufferOffset = readFromBlob<int32_t>(blob, inputInfoSecOffset);
auto nameLength = readFromBlob<uint32_t>(blob, inputInfoSecOffset);
std::string inputName(nameLength, 0);
for (auto& c : inputName) {
c = readFromBlob<char>(blob, inputInfoSecOffset);
const auto processedInput = readIO(_inputInfo, inputSectionOffset, i);
if (!isIOShapeName(processedInput.getName())) {
ie::InputInfo input;
input.setInputData(std::make_shared<ie::Data>(processedInput));
_networkInputs[processedInput.getName()] = std::make_shared<ie::InputInfo>(input);
}
// Truncate zeros
inputName = inputName.c_str();
auto dataType = readFromBlob<DataType>(blob, inputInfoSecOffset);
auto orderCode = readFromBlob<uint32_t>(blob, inputInfoSecOffset);
auto numDims = readFromBlob<uint32_t>(blob, inputInfoSecOffset);
auto dimsOrder = DimsOrder::fromCode(orderCode);
auto perm = dimsOrder.toPermutation();
IE_ASSERT(perm.size() == numDims);
auto dimsLocation = readFromBlob<Location>(blob, inputInfoSecOffset);
VPU_THROW_UNLESS(dimsLocation == Location::Blob,
"BlobReader error while parsing {} input data: only Blob location for input shape is supported, but {} was given",
inputName, dimsLocation);
auto dimsOffset = _blobHeader.const_data_section_offset + readFromBlob<uint32_t>(blob, inputInfoSecOffset);
// Skip strides' location and offset
inputInfoSecOffset += 2 * sizeof(uint32_t);
DimValues vpuDims;
for (int i = 0; i < perm.size(); ++i) {
vpuDims.set(perm[i], readFromBlob<uint32_t>(blob, dimsOffset));
}
ie::TensorDesc ieDesc = DataDesc(dataType, dimsOrder, vpuDims).toTensorDesc();
ie::Data inputData(inputName, ieDesc);
ie::InputInfo input;
input.setInputData(std::make_shared<ie::Data>(inputData));
_networkInputs[input.name()] = std::make_shared<ie::InputInfo>(input);
_inputInfo.offset[input.name()] = ioBufferOffset;
}
auto outputInfoSecOffset = _blobHeader.output_info_section_offset;
for (size_t i = 0; i < _blobHeader.outputs_count; i++) {
auto ioIdx = readFromBlob<uint32_t>(blob, outputInfoSecOffset);
IE_ASSERT(ioIdx == i);
auto ioBufferOffset = readFromBlob<int32_t>(blob, outputInfoSecOffset);
auto nameLength = readFromBlob<uint32_t>(blob, outputInfoSecOffset);
std::string outputName(nameLength, 0);
for (auto& c : outputName) {
c = readFromBlob<char>(blob, outputInfoSecOffset);
auto outputSectionOffset = _blobHeader.output_info_section_offset;
for (uint32_t i = 0; i < _blobHeader.outputs_count; i++) {
const auto processedOutput = readIO(_outputInfo, outputSectionOffset, i);
if (!isIOShapeName(processedOutput.getName())) {
_networkOutputs[processedOutput.getName()] = std::make_shared<ie::Data>(processedOutput);
}
// Truncate zeros
outputName = outputName.c_str();
auto dataType = readFromBlob<DataType>(blob, outputInfoSecOffset);
auto orderCode = readFromBlob<uint32_t>(blob, outputInfoSecOffset);
auto numDims = readFromBlob<uint32_t>(blob, outputInfoSecOffset);
auto dimsOrder = DimsOrder::fromCode(orderCode);
auto perm = dimsOrder.toPermutation();
IE_ASSERT(perm.size() == numDims);
auto dimsLocation = readFromBlob<Location>(blob, outputInfoSecOffset);
VPU_THROW_UNLESS(dimsLocation == Location::Blob,
"BlobReader error while parsing {} output data: only Blob location for output shape is supported, but {} was given",
outputName, dimsLocation);
auto dimsOffset = _blobHeader.const_data_section_offset + readFromBlob<uint32_t>(blob, outputInfoSecOffset);
// Skip strides' location and offset
outputInfoSecOffset += 2 * sizeof(uint32_t);
DimValues vpuDims;
for (int i = 0; i < perm.size(); ++i) {
vpuDims.set(perm[i], readFromBlob<uint32_t>(blob, dimsOffset));
}
ie::TensorDesc ieDesc = DataDesc(dataType, dimsOrder, vpuDims).toTensorDesc();
ie::Data outputData(outputName, ieDesc);
_networkOutputs[outputData.getName()] = std::make_shared<ie::Data>(outputData);
_outputInfo.offset[outputData.getName()] = ioBufferOffset;
}
}

View File

@ -4,6 +4,8 @@
#include "vpu/middleend/pass_manager.hpp"
#include "vpu/utils/shape_io.hpp"
#include <set>
#include <memory>
@ -70,9 +72,7 @@ public:
model->connectDataWithShape(shape, output);
if (output->usage() == DataUsage::Output) {
// MyriadInferRequest::GetResult assumes that dynamic data object has shape data object
// with the same name + suffix "@shape"
const auto shapeName = output->name() + "@shape";
const auto shapeName = createIOShapeName(output->name());
const auto& shapeOutput = model->addOutputData(shapeName, shape->desc());
const auto& shapeProducer = shape->producer();

View File

@ -3,6 +3,8 @@
//
#include <vpu/frontend/frontend.hpp>
#include <vpu/utils/shape_io.hpp>
#include <ngraph/node.hpp>
namespace vpu {
@ -92,7 +94,7 @@ void FrontEnd::parseDSR(const Model& model, const ie::CNNLayerPtr& layer, const
auto shapeDataObject = shape;
if (dataOutput->usage() == DataUsage::Output && shapeDataObject->usage() != DataUsage::Output) {
const auto& shapeOutput = model->addOutputData(dataOutput->name() + "@shape", shape->desc());
const auto& shapeOutput = model->addOutputData(createIOShapeName(dataOutput->name()), shape->desc());
bindData(shapeOutput, shape->origData());
for (const auto& shapeConsumerEdge : shape->consumerEdges()) {

View File

@ -0,0 +1,17 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "vpu/utils/shape_io.hpp"
namespace vpu {
std::string createIOShapeName(std::string srcName) {
return srcName + "@shape";
}
bool isIOShapeName(std::string name) {
return name.find("@shape") != std::string::npos;
}
} // namespace vpu

View File

@ -13,6 +13,7 @@
#include <vpu/utils/perf_report.hpp>
#include <vpu/utils/ie_helpers.hpp>
#include <vpu/utils/profiling.hpp>
#include <vpu/utils/shape_io.hpp>
#include "myriad_executable_network.h"
#include "myriad_infer_request.h"
@ -236,8 +237,7 @@ void MyriadInferRequest::GetResult() {
auto ieOutDims = ieOutDesc.getDims();
// Eject dynamic output shape (suffix "@shape") and copy it to vector of dimensions in reverse order
const auto& shapeInfo = _outputInfo.offset.find(ieBlobName + "@shape");
const auto& shapeInfo = _outputInfo.offset.find(createIOShapeName(ieBlobName));
// if (isDynamic)
if (shapeInfo != _outputInfo.offset.end()) {
auto outData = networkOutputs[ieBlobName];

View File

@ -1,196 +0,0 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include <memory>
#include <tuple>
#include <vector>
#include <string>
#include <fstream>
#include <ie_core.hpp>
#include "common_test_utils/common_utils.hpp"
#include "functional_test_utils/plugin_cache.hpp"
#include "functional_test_utils/layer_test_utils.hpp"
#include "functional_test_utils/blob_utils.hpp"
#include "ngraph_functions/utils/ngraph_helpers.hpp"
#include "ngraph_functions/builders.hpp"
#include "ngraph_functions/pass/convert_prc.hpp"
typedef std::tuple<
InferenceEngine::Precision, // Network Precision
std::string, // Target Device
std::map<std::string, std::string>, // Export Configuration
std::map<std::string, std::string> // Import Configuration
> exportImportNetworkParams;
namespace LayerTestsDefinitions {
class ImportNetworkTest : public testing::WithParamInterface<exportImportNetworkParams>,
public LayerTestsUtils::LayerTestsCommon {
public:
static std::string getTestCaseName(testing::TestParamInfo<exportImportNetworkParams> obj) {
InferenceEngine::Precision netPrecision;
std::string targetDevice;
std::map<std::string, std::string> exportConfiguration;
std::map<std::string, std::string> importConfiguration;
std::tie(netPrecision, targetDevice, exportConfiguration, importConfiguration) = obj.param;
std::ostringstream result;
result << "netPRC=" << netPrecision.name() << "_";
result << "targetDevice=" << targetDevice << "_";
for (auto const& configItem : exportConfiguration) {
result << "_exportConfigItem=" << configItem.first << "_" << configItem.second;
}
for (auto const& configItem : importConfiguration) {
result << "_importConfigItem=" << configItem.first << "_" << configItem.second;
}
return result.str();
}
void Run() override {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
configuration.insert(exportConfiguration.begin(), exportConfiguration.end());
LoadNetwork();
Infer();
executableNetwork.Export("exported_model.blob");
const auto& actualOutputs = GetOutputs();
auto referenceOutputs = CalculateRefs();
Compare(referenceOutputs, actualOutputs);
for (auto const& configItem : importConfiguration) {
configuration[configItem.first] = configItem.second;
}
std::fstream inputStream("exported_model.blob", std::ios_base::in | std::ios_base::binary);
if (inputStream.fail()) {
FAIL() << "Cannot open file to import model: exported_model.blob";
}
auto importedNetwork = core->ImportNetwork(inputStream, targetDevice, configuration);
for (const auto& next_input : importedNetwork.GetInputsInfo()) {
ASSERT_NO_THROW(executableNetwork.GetInputsInfo()[next_input.first]);
}
for (const auto& next_output : importedNetwork.GetOutputsInfo()) {
ASSERT_NO_THROW(executableNetwork.GetOutputsInfo()[next_output.first]);
}
auto importedOutputs = CalculateImportedNetwork(importedNetwork);
Compare(importedOutputs, actualOutputs);
}
protected:
void SetUp() override {
InferenceEngine::Precision netPrecision;
std::tie(netPrecision, targetDevice, exportConfiguration, importConfiguration) = this->GetParam();
auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
auto params = ngraph::builder::makeParams(ngPrc, { {1, 336} });
std::vector<size_t> outFormShapes1 = { 1, 1, 168, 2 };
auto pattern1 = std::make_shared<ngraph::opset1::Constant>(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, outFormShapes1);
auto reshape1 = std::make_shared<ngraph::opset1::Reshape>(params[0], pattern1, false);
auto permute1 = std::make_shared<ngraph::opset1::Transpose>(reshape1,
ngraph::opset1::Constant::create(ngraph::element::i64, ngraph::Shape{ 4 }, { 0, 3, 1, 2 }));
auto conv1 = ngraph::builder::makeConvolution(permute1, ngPrc, { 1, 8 }, { 1, 1 }, { 0, 0 }, { 0, 0 }, { 1, 1 },
ngraph::op::PadType::VALID, 12);
auto permute2 = std::make_shared<ngraph::opset1::Transpose>(conv1,
ngraph::opset1::Constant::create(ngraph::element::i64, ngraph::Shape{ 4 }, { 0, 2, 3, 1 }));
std::vector<size_t> outFormShapes2 = { 1, 1932 };
auto pattern2 = std::make_shared<ngraph::opset1::Constant>(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes2);
auto reshape2 = std::make_shared<ngraph::opset1::Reshape>(permute2, pattern2, false);
ngraph::ResultVector results{ std::make_shared<ngraph::opset1::Result>(reshape2) };
function = std::make_shared<ngraph::Function>(results, params, "ExportImportNetwork");
}
private:
std::map<std::string, std::string> exportConfiguration;
std::map<std::string, std::string> importConfiguration;
std::vector<std::vector<std::uint8_t>> CalculateImportedNetwork(InferenceEngine::ExecutableNetwork& importedNetwork) {
auto refInferRequest = importedNetwork.CreateInferRequest();
std::vector<InferenceEngine::InputInfo::CPtr> refInfos;
for (const auto& input : importedNetwork.GetInputsInfo()) {
const auto& info = input.second;
refInfos.push_back(info);
}
for (std::size_t i = 0; i < inputs.size(); ++i) {
const auto& input = inputs[i];
const auto& info = refInfos[i];
refInferRequest.SetBlob(info->name(), input);
}
refInferRequest.Infer();
auto refOutputs = std::vector<InferenceEngine::Blob::Ptr>{};
for (const auto& output : importedNetwork.GetOutputsInfo()) {
const auto& name = output.first;
refOutputs.push_back(refInferRequest.GetBlob(name));
}
auto referenceOutputs = std::vector<std::vector<std::uint8_t>>(refOutputs.size());
for (std::size_t i = 0; i < refOutputs.size(); ++i) {
const auto& reference = refOutputs[i];
const auto refSize = reference->byteSize();
auto& expectedOutput = referenceOutputs[i];
expectedOutput.resize(refSize);
auto refMemory = InferenceEngine::as<InferenceEngine::MemoryBlob>(reference);
IE_ASSERT(refMemory);
const auto refLockedMemory = refMemory->wmap();
const auto referenceBuffer = refLockedMemory.as<const std::uint8_t*>();
std::copy(referenceBuffer, referenceBuffer + refSize, expectedOutput.data());
}
return referenceOutputs;
}
};
TEST_P(ImportNetworkTest, CompareWithRefImpl) {
Run();
};
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16
};
const std::vector<std::map<std::string, std::string>> exportConfigs = {
{
{"GNA_DEVICE_MODE", "GNA_SW_EXACT"},
{"GNA_SCALE_FACTOR_0", "327.67"}
}
};
const std::vector<std::map<std::string, std::string>> importConfigs = {
{
{"GNA_DEVICE_MODE", "GNA_SW_EXACT"},
{"GNA_SCALE_FACTOR_0", "32767"}
},
{
{"GNA_DEVICE_MODE", "GNA_SW_EXACT"},
{"GNA_SCALE_FACTOR_0", "327.67"}
},
};
INSTANTIATE_TEST_CASE_P(smoke_ImportNetworkCase, ImportNetworkTest,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_GNA),
::testing::ValuesIn(exportConfigs),
::testing::ValuesIn(importConfigs)),
ImportNetworkTest::getTestCaseName);
} // namespace LayerTestsDefinitions

View File

@ -0,0 +1,70 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "import_export_tests/import_reshape_permute_conv.hpp"
#include <fstream>
#include <stdio.h>
using namespace LayerTestsDefinitions;
namespace {
class ImportReshapePermuteConvGNA : public ImportReshapePermuteConv {
private:
void exportImportNetwork() override {
executableNetwork.Export(fileName);
std::fstream inputStream(fileName, std::ios_base::in | std::ios_base::binary);
if (inputStream.fail()) {
FAIL() << "Cannot open file to import model: " << fileName;
}
executableNetwork = core->ImportNetwork(inputStream, targetDevice, configuration);
}
protected:
void TearDown() override {
if (remove(fileName.c_str()) != 0) {
FAIL() << "Error: could not delete file " << fileName;
}
}
private:
std::string fileName = "exported_model.blob";
};
TEST_P(ImportReshapePermuteConvGNA, CompareWithRefImpl) {
Run();
};
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16
};
const std::vector<std::map<std::string, std::string>> exportConfigs = {
{
{"GNA_DEVICE_MODE", "GNA_SW_EXACT"},
{"GNA_SCALE_FACTOR_0", "327.67"}
}
};
const std::vector<std::map<std::string, std::string>> importConfigs = {
{
{"GNA_DEVICE_MODE", "GNA_SW_EXACT"},
{"GNA_SCALE_FACTOR_0", "32767"}
},
{
{"GNA_DEVICE_MODE", "GNA_SW_EXACT"},
{"GNA_SCALE_FACTOR_0", "327.67"}
},
};
INSTANTIATE_TEST_CASE_P(smoke_ImportNetworkCase, ImportReshapePermuteConvGNA,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_GNA),
::testing::ValuesIn(exportConfigs),
::testing::ValuesIn(importConfigs)),
ImportReshapePermuteConvGNA::getTestCaseName);
} // namespace

View File

@ -0,0 +1,32 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "import_export_tests/import_nonzero.hpp"
#include "vpu/private_plugin_config.hpp"
using namespace LayerTestsDefinitions;
namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
};
const std::vector<std::map<std::string, std::string>> exportConfigs = {
{}
};
const std::vector<std::map<std::string, std::string>> importConfigs = {
{}
};
INSTANTIATE_TEST_CASE_P(smoke_ImportNetworkCase, ImportNonZero,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_MYRIAD),
::testing::ValuesIn(exportConfigs),
::testing::ValuesIn(importConfigs)),
ImportNonZero::getTestCaseName);
} // namespace

View File

@ -0,0 +1,16 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "functional_test_utils/import_export_base/import_export_base.hpp"
namespace LayerTestsDefinitions {
class ImportNonZero : public FuncTestUtils::ImportNetworkTestBase {
protected:
void SetUp() override;
};
} // namespace LayerTestsDefinitions

View File

@ -0,0 +1,16 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "functional_test_utils/import_export_base/import_export_base.hpp"
namespace LayerTestsDefinitions {
class ImportReshapePermuteConv : public FuncTestUtils::ImportNetworkTestBase {
protected:
void SetUp() override;
};
} // namespace LayerTestsDefinitions

View File

@ -0,0 +1,26 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "import_export_tests/import_nonzero.hpp"
#include "ngraph/opsets/opset5.hpp"
namespace LayerTestsDefinitions {
void ImportNonZero::SetUp() {
InferenceEngine::Precision netPrecision;
std::tie(netPrecision, targetDevice, exportConfiguration, importConfiguration) = this->GetParam();
const auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
const auto parameter = std::make_shared<ngraph::opset5::Parameter>(ngPrc, ngraph::Shape{1000});
const auto nonZero = std::make_shared<ngraph::opset5::NonZero>(parameter);
function = std::make_shared<ngraph::Function>(nonZero->outputs(), ngraph::ParameterVector{parameter}, "ExportImportNetwork");
}
TEST_P(ImportNonZero, CompareWithRefImpl) {
Run();
};
} // namespace LayerTestsDefinitions

View File

@ -0,0 +1,43 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "import_export_tests/import_reshape_permute_conv.hpp"
#include "ngraph_functions/builders.hpp"
namespace LayerTestsDefinitions {
void ImportReshapePermuteConv::SetUp() {
InferenceEngine::Precision netPrecision;
std::tie(netPrecision, targetDevice, exportConfiguration, importConfiguration) = this->GetParam();
auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
auto params = ngraph::builder::makeParams(ngPrc, { {1, 336} });
std::vector<size_t> outFormShapes1 = { 1, 1, 168, 2 };
auto pattern1 = std::make_shared<ngraph::opset1::Constant>(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, outFormShapes1);
auto reshape1 = std::make_shared<ngraph::opset1::Reshape>(params[0], pattern1, false);
auto permute1 = std::make_shared<ngraph::opset1::Transpose>(reshape1,
ngraph::opset1::Constant::create(ngraph::element::i64, ngraph::Shape{ 4 }, { 0, 3, 1, 2 }));
auto conv1 = ngraph::builder::makeConvolution(permute1, ngPrc, { 1, 8 }, { 1, 1 }, { 0, 0 }, { 0, 0 }, { 1, 1 },
ngraph::op::PadType::VALID, 12);
auto permute2 = std::make_shared<ngraph::opset1::Transpose>(conv1,
ngraph::opset1::Constant::create(ngraph::element::i64, ngraph::Shape{ 4 }, { 0, 2, 3, 1 }));
std::vector<size_t> outFormShapes2 = { 1, 1932 };
auto pattern2 = std::make_shared<ngraph::opset1::Constant>(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes2);
auto reshape2 = std::make_shared<ngraph::opset1::Reshape>(permute2, pattern2, false);
ngraph::ResultVector results{ std::make_shared<ngraph::opset1::Result>(reshape2) };
function = std::make_shared<ngraph::Function>(results, params, "ExportImportNetwork");
}
TEST_P(ImportReshapePermuteConv, CompareWithRefImpl) {
Run();
};
} // namespace LayerTestsDefinitions

View File

@ -0,0 +1,73 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "import_export_base.hpp"
#include <fstream>
namespace FuncTestUtils {
std::string ImportNetworkTestBase::getTestCaseName(testing::TestParamInfo<exportImportNetworkParams> obj) {
InferenceEngine::Precision netPrecision;
std::string targetDevice;
std::map<std::string, std::string> exportConfiguration;
std::map<std::string, std::string> importConfiguration;
std::tie(netPrecision, targetDevice, exportConfiguration, importConfiguration) = obj.param;
std::ostringstream result;
result << "netPRC=" << netPrecision.name() << "_";
result << "targetDevice=" << targetDevice << "_";
for (auto const& configItem : exportConfiguration) {
result << "_exportConfigItem=" << configItem.first << "_" << configItem.second;
}
for (auto const& configItem : importConfiguration) {
result << "_importConfigItem=" << configItem.first << "_" << configItem.second;
}
return result.str();
}
void ImportNetworkTestBase::exportImportNetwork() {
std::stringstream strm;
executableNetwork.Export(strm);
executableNetwork = core->ImportNetwork(strm, targetDevice, configuration);
}
void ImportNetworkTestBase::Run() {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
configuration.insert(exportConfiguration.begin(), exportConfiguration.end());
LoadNetwork();
Infer();
const auto& actualOutputs = GetOutputs();
auto referenceOutputs = CalculateRefs();
Compare(referenceOutputs, actualOutputs);
for (auto const& configItem : importConfiguration) {
configuration[configItem.first] = configItem.second;
}
const auto compiledExecNetwork = executableNetwork;
exportImportNetwork();
const auto importedExecNetwork = executableNetwork;
Infer();
ASSERT_EQ(importedExecNetwork.GetInputsInfo().size(), compiledExecNetwork.GetInputsInfo().size());
ASSERT_EQ(importedExecNetwork.GetOutputsInfo().size(), compiledExecNetwork.GetOutputsInfo().size());
for (const auto& next_input : importedExecNetwork.GetInputsInfo()) {
ASSERT_NO_THROW(compiledExecNetwork.GetInputsInfo()[next_input.first]);
}
for (const auto& next_output : importedExecNetwork.GetOutputsInfo()) {
ASSERT_NO_THROW(compiledExecNetwork.GetOutputsInfo()[next_output.first]);
}
auto importedOutputs = GetOutputs();
ASSERT_EQ(actualOutputs.size(), importedOutputs.size());
for (size_t i = 0; i < actualOutputs.size(); i++) {
Compare(actualOutputs[i], importedOutputs[i]);
}
}
} // namespace FuncTestUtils

View File

@ -0,0 +1,34 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "functional_test_utils/layer_test_utils.hpp"
#include <ie_core.hpp>
typedef std::tuple<
InferenceEngine::Precision, // Network Precision
std::string, // Target Device
std::map<std::string, std::string>, // Export Configuration
std::map<std::string, std::string> // Import Configuration
> exportImportNetworkParams;
namespace FuncTestUtils {
class ImportNetworkTestBase : public testing::WithParamInterface<exportImportNetworkParams>,
public LayerTestsUtils::LayerTestsCommon {
public:
static std::string getTestCaseName(testing::TestParamInfo<exportImportNetworkParams> obj);
void Run() override;
protected:
std::map<std::string, std::string> exportConfiguration;
std::map<std::string, std::string> importConfiguration;
private:
virtual void exportImportNetwork();
};
} // namespace FuncTestUtils