From e81201ea35d9f820e4b37d60d3c5222662a8e7c9 Mon Sep 17 00:00:00 2001 From: Andrew Bakalin Date: Tue, 8 Dec 2020 15:19:27 +0300 Subject: [PATCH] [VPU][TESTS][GNA] Fix dynamic models import on VPU (#3427) * [VPU] Fix dynamic networks import * [IE][GNA][TESTS] Move ImportExport tests from GNA to shared part * [VPU][Tests] Add ExportImport test for dynamic network * [VPU] Review fixes * [VPU][Tests] Review and test fixes * [VPU][Tests] Move TEST_P to shared part --- .../include/vpu/utils/shape_io.hpp | 15 ++ .../vpu/graph_transformer/src/blob_reader.cpp | 151 ++++++-------- .../middleend/passes/propagate_dynamism.cpp | 6 +- .../src/stages/dynamic_shape_resolver.cpp | 4 +- .../graph_transformer/src/utils/shape_io.cpp | 17 ++ .../myriad_plugin/myriad_infer_request.cpp | 4 +- .../plugin/gna/import_export_network.cpp | 196 ------------------ .../import_reshape_permute_conv.cpp | 70 +++++++ .../import_export_tests/import_nonzero.cpp | 32 +++ .../import_export_tests/import_nonzero.hpp | 16 ++ .../import_reshape_permute_conv.hpp | 16 ++ .../import_export_tests/import_nonzero.cpp | 26 +++ .../import_reshape_permute_conv.cpp | 43 ++++ .../import_export_base/import_export_base.cpp | 73 +++++++ .../import_export_base/import_export_base.hpp | 34 +++ 15 files changed, 411 insertions(+), 292 deletions(-) create mode 100644 inference-engine/src/vpu/graph_transformer/include/vpu/utils/shape_io.hpp create mode 100644 inference-engine/src/vpu/graph_transformer/src/utils/shape_io.cpp delete mode 100644 inference-engine/tests/functional/plugin/gna/import_export_network.cpp create mode 100644 inference-engine/tests/functional/plugin/gna/shared_tests_instances/import_export_tests/import_reshape_permute_conv.cpp create mode 100644 inference-engine/tests/functional/plugin/myriad/shared_tests_instances/import_export_tests/import_nonzero.cpp create mode 100644 inference-engine/tests/functional/plugin/shared/include/import_export_tests/import_nonzero.hpp create mode 100644 inference-engine/tests/functional/plugin/shared/include/import_export_tests/import_reshape_permute_conv.hpp create mode 100644 inference-engine/tests/functional/plugin/shared/src/import_export_tests/import_nonzero.cpp create mode 100644 inference-engine/tests/functional/plugin/shared/src/import_export_tests/import_reshape_permute_conv.cpp create mode 100644 inference-engine/tests/ie_test_utils/functional_test_utils/import_export_base/import_export_base.cpp create mode 100644 inference-engine/tests/ie_test_utils/functional_test_utils/import_export_base/import_export_base.hpp diff --git a/inference-engine/src/vpu/graph_transformer/include/vpu/utils/shape_io.hpp b/inference-engine/src/vpu/graph_transformer/include/vpu/utils/shape_io.hpp new file mode 100644 index 00000000000..e0315feba28 --- /dev/null +++ b/inference-engine/src/vpu/graph_transformer/include/vpu/utils/shape_io.hpp @@ -0,0 +1,15 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +namespace vpu { + +std::string createIOShapeName(std::string srcName); + +bool isIOShapeName(std::string name); + +} //namespace vpu diff --git a/inference-engine/src/vpu/graph_transformer/src/blob_reader.cpp b/inference-engine/src/vpu/graph_transformer/src/blob_reader.cpp index 80b34c8e3a7..03dc2f94894 100644 --- a/inference-engine/src/vpu/graph_transformer/src/blob_reader.cpp +++ b/inference-engine/src/vpu/graph_transformer/src/blob_reader.cpp @@ -14,6 +14,7 @@ #include #include #include +#include namespace vpu { @@ -49,101 +50,71 @@ void BlobReader::parse(const std::vector& blob) { _inputInfo.totalSize = _blobHeader.inputs_size; _outputInfo.totalSize = _blobHeader.outputs_size; - auto inputInfoSecOffset = _blobHeader.input_info_section_offset; + const auto readIO = [this, &blob](DataInfo& ioInfo, uint32_t& ioSectionOffset, uint32_t idx) { + auto ioIdx = readFromBlob(blob, ioSectionOffset); + VPU_THROW_UNLESS(ioIdx == idx, "BlobReader failed on I/O processing, its' ioIdx parameter (which is {}) is " + "different from its' processing order (which is {})", ioIdx, idx); + + auto ioBufferOffset = readFromBlob(blob, ioSectionOffset); + + auto nameLength = readFromBlob(blob, ioSectionOffset); + std::string ioName(nameLength, 0); + for (auto& c : ioName) { + c = readFromBlob(blob, ioSectionOffset); + } + + // Truncate zeros + ioName = ioName.c_str(); + + auto dataType = readFromBlob(blob, ioSectionOffset); + auto orderCode = readFromBlob(blob, ioSectionOffset); + + auto numDims = readFromBlob(blob, ioSectionOffset); + + auto dimsOrder = DimsOrder::fromCode(orderCode); + auto perm = dimsOrder.toPermutation(); + IE_ASSERT(perm.size() == numDims); + + auto dimsLocation = readFromBlob(blob, ioSectionOffset); + VPU_THROW_UNLESS(dimsLocation == Location::Blob, + "BlobReader error while parsing data {}: only Blob location for input/output shape is supported, but {} was given", + ioName, dimsLocation); + auto dimsOffset = _blobHeader.const_data_section_offset + readFromBlob(blob, ioSectionOffset); + + // Skip strides' location and offset + ioSectionOffset += 2 * sizeof(uint32_t); + + DimValues vpuDims; + + for (const auto& dim : perm) { + vpuDims.set(dim, readFromBlob(blob, dimsOffset)); + } + + ie::TensorDesc ieDesc = DataDesc(dataType, dimsOrder, vpuDims).toTensorDesc(); + ie::Data ioData(ioName, ieDesc); + + ioInfo.offset[ioName] = ioBufferOffset; + ioInfo.descFromPlugin[ioName] = ieDesc; + + return ioData; + }; + + auto inputSectionOffset = _blobHeader.input_info_section_offset; for (uint32_t i = 0; i < _blobHeader.inputs_count; i++) { - auto ioIdx = readFromBlob(blob, inputInfoSecOffset); - IE_ASSERT(ioIdx == i); - - auto ioBufferOffset = readFromBlob(blob, inputInfoSecOffset); - - auto nameLength = readFromBlob(blob, inputInfoSecOffset); - std::string inputName(nameLength, 0); - for (auto& c : inputName) { - c = readFromBlob(blob, inputInfoSecOffset); + const auto processedInput = readIO(_inputInfo, inputSectionOffset, i); + if (!isIOShapeName(processedInput.getName())) { + ie::InputInfo input; + input.setInputData(std::make_shared(processedInput)); + _networkInputs[processedInput.getName()] = std::make_shared(input); } - - // Truncate zeros - inputName = inputName.c_str(); - - auto dataType = readFromBlob(blob, inputInfoSecOffset); - auto orderCode = readFromBlob(blob, inputInfoSecOffset); - - auto numDims = readFromBlob(blob, inputInfoSecOffset); - - auto dimsOrder = DimsOrder::fromCode(orderCode); - auto perm = dimsOrder.toPermutation(); - IE_ASSERT(perm.size() == numDims); - - auto dimsLocation = readFromBlob(blob, inputInfoSecOffset); - VPU_THROW_UNLESS(dimsLocation == Location::Blob, - "BlobReader error while parsing {} input data: only Blob location for input shape is supported, but {} was given", - inputName, dimsLocation); - auto dimsOffset = _blobHeader.const_data_section_offset + readFromBlob(blob, inputInfoSecOffset); - - // Skip strides' location and offset - inputInfoSecOffset += 2 * sizeof(uint32_t); - - DimValues vpuDims; - - for (int i = 0; i < perm.size(); ++i) { - vpuDims.set(perm[i], readFromBlob(blob, dimsOffset)); - } - - ie::TensorDesc ieDesc = DataDesc(dataType, dimsOrder, vpuDims).toTensorDesc(); - ie::Data inputData(inputName, ieDesc); - - ie::InputInfo input; - input.setInputData(std::make_shared(inputData)); - - _networkInputs[input.name()] = std::make_shared(input); - _inputInfo.offset[input.name()] = ioBufferOffset; } - auto outputInfoSecOffset = _blobHeader.output_info_section_offset; - for (size_t i = 0; i < _blobHeader.outputs_count; i++) { - auto ioIdx = readFromBlob(blob, outputInfoSecOffset); - IE_ASSERT(ioIdx == i); - - auto ioBufferOffset = readFromBlob(blob, outputInfoSecOffset); - - auto nameLength = readFromBlob(blob, outputInfoSecOffset); - std::string outputName(nameLength, 0); - for (auto& c : outputName) { - c = readFromBlob(blob, outputInfoSecOffset); + auto outputSectionOffset = _blobHeader.output_info_section_offset; + for (uint32_t i = 0; i < _blobHeader.outputs_count; i++) { + const auto processedOutput = readIO(_outputInfo, outputSectionOffset, i); + if (!isIOShapeName(processedOutput.getName())) { + _networkOutputs[processedOutput.getName()] = std::make_shared(processedOutput); } - - // Truncate zeros - outputName = outputName.c_str(); - - auto dataType = readFromBlob(blob, outputInfoSecOffset); - auto orderCode = readFromBlob(blob, outputInfoSecOffset); - - auto numDims = readFromBlob(blob, outputInfoSecOffset); - - auto dimsOrder = DimsOrder::fromCode(orderCode); - auto perm = dimsOrder.toPermutation(); - IE_ASSERT(perm.size() == numDims); - - auto dimsLocation = readFromBlob(blob, outputInfoSecOffset); - VPU_THROW_UNLESS(dimsLocation == Location::Blob, - "BlobReader error while parsing {} output data: only Blob location for output shape is supported, but {} was given", - outputName, dimsLocation); - auto dimsOffset = _blobHeader.const_data_section_offset + readFromBlob(blob, outputInfoSecOffset); - - // Skip strides' location and offset - outputInfoSecOffset += 2 * sizeof(uint32_t); - - DimValues vpuDims; - - for (int i = 0; i < perm.size(); ++i) { - vpuDims.set(perm[i], readFromBlob(blob, dimsOffset)); - } - - ie::TensorDesc ieDesc = DataDesc(dataType, dimsOrder, vpuDims).toTensorDesc(); - ie::Data outputData(outputName, ieDesc); - - _networkOutputs[outputData.getName()] = std::make_shared(outputData); - _outputInfo.offset[outputData.getName()] = ioBufferOffset; } } diff --git a/inference-engine/src/vpu/graph_transformer/src/middleend/passes/propagate_dynamism.cpp b/inference-engine/src/vpu/graph_transformer/src/middleend/passes/propagate_dynamism.cpp index 50385531676..406c1d052bd 100644 --- a/inference-engine/src/vpu/graph_transformer/src/middleend/passes/propagate_dynamism.cpp +++ b/inference-engine/src/vpu/graph_transformer/src/middleend/passes/propagate_dynamism.cpp @@ -4,6 +4,8 @@ #include "vpu/middleend/pass_manager.hpp" +#include "vpu/utils/shape_io.hpp" + #include #include @@ -70,9 +72,7 @@ public: model->connectDataWithShape(shape, output); if (output->usage() == DataUsage::Output) { - // MyriadInferRequest::GetResult assumes that dynamic data object has shape data object - // with the same name + suffix "@shape" - const auto shapeName = output->name() + "@shape"; + const auto shapeName = createIOShapeName(output->name()); const auto& shapeOutput = model->addOutputData(shapeName, shape->desc()); const auto& shapeProducer = shape->producer(); diff --git a/inference-engine/src/vpu/graph_transformer/src/stages/dynamic_shape_resolver.cpp b/inference-engine/src/vpu/graph_transformer/src/stages/dynamic_shape_resolver.cpp index 5c6283c327e..03bdeccf949 100644 --- a/inference-engine/src/vpu/graph_transformer/src/stages/dynamic_shape_resolver.cpp +++ b/inference-engine/src/vpu/graph_transformer/src/stages/dynamic_shape_resolver.cpp @@ -3,6 +3,8 @@ // #include +#include + #include namespace vpu { @@ -92,7 +94,7 @@ void FrontEnd::parseDSR(const Model& model, const ie::CNNLayerPtr& layer, const auto shapeDataObject = shape; if (dataOutput->usage() == DataUsage::Output && shapeDataObject->usage() != DataUsage::Output) { - const auto& shapeOutput = model->addOutputData(dataOutput->name() + "@shape", shape->desc()); + const auto& shapeOutput = model->addOutputData(createIOShapeName(dataOutput->name()), shape->desc()); bindData(shapeOutput, shape->origData()); for (const auto& shapeConsumerEdge : shape->consumerEdges()) { diff --git a/inference-engine/src/vpu/graph_transformer/src/utils/shape_io.cpp b/inference-engine/src/vpu/graph_transformer/src/utils/shape_io.cpp new file mode 100644 index 00000000000..19725e5d7b6 --- /dev/null +++ b/inference-engine/src/vpu/graph_transformer/src/utils/shape_io.cpp @@ -0,0 +1,17 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "vpu/utils/shape_io.hpp" + +namespace vpu { + +std::string createIOShapeName(std::string srcName) { + return srcName + "@shape"; +} + +bool isIOShapeName(std::string name) { + return name.find("@shape") != std::string::npos; +} + +} // namespace vpu diff --git a/inference-engine/src/vpu/myriad_plugin/myriad_infer_request.cpp b/inference-engine/src/vpu/myriad_plugin/myriad_infer_request.cpp index 061a910808b..c36322c66aa 100644 --- a/inference-engine/src/vpu/myriad_plugin/myriad_infer_request.cpp +++ b/inference-engine/src/vpu/myriad_plugin/myriad_infer_request.cpp @@ -13,6 +13,7 @@ #include #include #include +#include #include "myriad_executable_network.h" #include "myriad_infer_request.h" @@ -236,8 +237,7 @@ void MyriadInferRequest::GetResult() { auto ieOutDims = ieOutDesc.getDims(); - // Eject dynamic output shape (suffix "@shape") and copy it to vector of dimensions in reverse order - const auto& shapeInfo = _outputInfo.offset.find(ieBlobName + "@shape"); + const auto& shapeInfo = _outputInfo.offset.find(createIOShapeName(ieBlobName)); // if (isDynamic) if (shapeInfo != _outputInfo.offset.end()) { auto outData = networkOutputs[ieBlobName]; diff --git a/inference-engine/tests/functional/plugin/gna/import_export_network.cpp b/inference-engine/tests/functional/plugin/gna/import_export_network.cpp deleted file mode 100644 index 9c0313ffef7..00000000000 --- a/inference-engine/tests/functional/plugin/gna/import_export_network.cpp +++ /dev/null @@ -1,196 +0,0 @@ -// Copyright (C) 2020 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include -#include -#include -#include -#include - -#include - -#include "common_test_utils/common_utils.hpp" -#include "functional_test_utils/plugin_cache.hpp" -#include "functional_test_utils/layer_test_utils.hpp" -#include "functional_test_utils/blob_utils.hpp" -#include "ngraph_functions/utils/ngraph_helpers.hpp" -#include "ngraph_functions/builders.hpp" - -#include "ngraph_functions/pass/convert_prc.hpp" - -typedef std::tuple< - InferenceEngine::Precision, // Network Precision - std::string, // Target Device - std::map, // Export Configuration - std::map // Import Configuration -> exportImportNetworkParams; - -namespace LayerTestsDefinitions { - -class ImportNetworkTest : public testing::WithParamInterface, - public LayerTestsUtils::LayerTestsCommon { - public: - static std::string getTestCaseName(testing::TestParamInfo obj) { - InferenceEngine::Precision netPrecision; - std::string targetDevice; - std::map exportConfiguration; - std::map importConfiguration; - std::tie(netPrecision, targetDevice, exportConfiguration, importConfiguration) = obj.param; - - std::ostringstream result; - result << "netPRC=" << netPrecision.name() << "_"; - result << "targetDevice=" << targetDevice << "_"; - for (auto const& configItem : exportConfiguration) { - result << "_exportConfigItem=" << configItem.first << "_" << configItem.second; - } - for (auto const& configItem : importConfiguration) { - result << "_importConfigItem=" << configItem.first << "_" << configItem.second; - } - return result.str(); - } - - void Run() override { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - - configuration.insert(exportConfiguration.begin(), exportConfiguration.end()); - LoadNetwork(); - Infer(); - executableNetwork.Export("exported_model.blob"); - - const auto& actualOutputs = GetOutputs(); - auto referenceOutputs = CalculateRefs(); - Compare(referenceOutputs, actualOutputs); - - for (auto const& configItem : importConfiguration) { - configuration[configItem.first] = configItem.second; - } - std::fstream inputStream("exported_model.blob", std::ios_base::in | std::ios_base::binary); - if (inputStream.fail()) { - FAIL() << "Cannot open file to import model: exported_model.blob"; - } - auto importedNetwork = core->ImportNetwork(inputStream, targetDevice, configuration); - for (const auto& next_input : importedNetwork.GetInputsInfo()) { - ASSERT_NO_THROW(executableNetwork.GetInputsInfo()[next_input.first]); - } - for (const auto& next_output : importedNetwork.GetOutputsInfo()) { - ASSERT_NO_THROW(executableNetwork.GetOutputsInfo()[next_output.first]); - } - auto importedOutputs = CalculateImportedNetwork(importedNetwork); - Compare(importedOutputs, actualOutputs); - } - - protected: - void SetUp() override { - InferenceEngine::Precision netPrecision; - std::tie(netPrecision, targetDevice, exportConfiguration, importConfiguration) = this->GetParam(); - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - - auto params = ngraph::builder::makeParams(ngPrc, { {1, 336} }); - - std::vector outFormShapes1 = { 1, 1, 168, 2 }; - auto pattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, outFormShapes1); - auto reshape1 = std::make_shared(params[0], pattern1, false); - - auto permute1 = std::make_shared(reshape1, - ngraph::opset1::Constant::create(ngraph::element::i64, ngraph::Shape{ 4 }, { 0, 3, 1, 2 })); - - auto conv1 = ngraph::builder::makeConvolution(permute1, ngPrc, { 1, 8 }, { 1, 1 }, { 0, 0 }, { 0, 0 }, { 1, 1 }, - ngraph::op::PadType::VALID, 12); - - auto permute2 = std::make_shared(conv1, - ngraph::opset1::Constant::create(ngraph::element::i64, ngraph::Shape{ 4 }, { 0, 2, 3, 1 })); - - std::vector outFormShapes2 = { 1, 1932 }; - auto pattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes2); - auto reshape2 = std::make_shared(permute2, pattern2, false); - - ngraph::ResultVector results{ std::make_shared(reshape2) }; - function = std::make_shared(results, params, "ExportImportNetwork"); - } - - private: - std::map exportConfiguration; - std::map importConfiguration; - - std::vector> CalculateImportedNetwork(InferenceEngine::ExecutableNetwork& importedNetwork) { - auto refInferRequest = importedNetwork.CreateInferRequest(); - std::vector refInfos; - for (const auto& input : importedNetwork.GetInputsInfo()) { - const auto& info = input.second; - refInfos.push_back(info); - } - - for (std::size_t i = 0; i < inputs.size(); ++i) { - const auto& input = inputs[i]; - const auto& info = refInfos[i]; - - refInferRequest.SetBlob(info->name(), input); - } - - refInferRequest.Infer(); - - auto refOutputs = std::vector{}; - for (const auto& output : importedNetwork.GetOutputsInfo()) { - const auto& name = output.first; - refOutputs.push_back(refInferRequest.GetBlob(name)); - } - - auto referenceOutputs = std::vector>(refOutputs.size()); - for (std::size_t i = 0; i < refOutputs.size(); ++i) { - const auto& reference = refOutputs[i]; - const auto refSize = reference->byteSize(); - - auto& expectedOutput = referenceOutputs[i]; - expectedOutput.resize(refSize); - - auto refMemory = InferenceEngine::as(reference); - IE_ASSERT(refMemory); - const auto refLockedMemory = refMemory->wmap(); - const auto referenceBuffer = refLockedMemory.as(); - - std::copy(referenceBuffer, referenceBuffer + refSize, expectedOutput.data()); - } - - return referenceOutputs; - } -}; - - TEST_P(ImportNetworkTest, CompareWithRefImpl) { - Run(); - }; - - const std::vector netPrecisions = { - InferenceEngine::Precision::FP32, - InferenceEngine::Precision::FP16 - }; - - const std::vector> exportConfigs = { - { - {"GNA_DEVICE_MODE", "GNA_SW_EXACT"}, - {"GNA_SCALE_FACTOR_0", "327.67"} - } - }; - - const std::vector> importConfigs = { - { - {"GNA_DEVICE_MODE", "GNA_SW_EXACT"}, - {"GNA_SCALE_FACTOR_0", "32767"} - }, - { - {"GNA_DEVICE_MODE", "GNA_SW_EXACT"}, - {"GNA_SCALE_FACTOR_0", "327.67"} - }, - }; - - INSTANTIATE_TEST_CASE_P(smoke_ImportNetworkCase, ImportNetworkTest, - ::testing::Combine( - ::testing::ValuesIn(netPrecisions), - ::testing::Values(CommonTestUtils::DEVICE_GNA), - ::testing::ValuesIn(exportConfigs), - ::testing::ValuesIn(importConfigs)), - ImportNetworkTest::getTestCaseName); - -} // namespace LayerTestsDefinitions - diff --git a/inference-engine/tests/functional/plugin/gna/shared_tests_instances/import_export_tests/import_reshape_permute_conv.cpp b/inference-engine/tests/functional/plugin/gna/shared_tests_instances/import_export_tests/import_reshape_permute_conv.cpp new file mode 100644 index 00000000000..27f92bd88d0 --- /dev/null +++ b/inference-engine/tests/functional/plugin/gna/shared_tests_instances/import_export_tests/import_reshape_permute_conv.cpp @@ -0,0 +1,70 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "import_export_tests/import_reshape_permute_conv.hpp" + +#include +#include + +using namespace LayerTestsDefinitions; + +namespace { + +class ImportReshapePermuteConvGNA : public ImportReshapePermuteConv { +private: + void exportImportNetwork() override { + executableNetwork.Export(fileName); + std::fstream inputStream(fileName, std::ios_base::in | std::ios_base::binary); + if (inputStream.fail()) { + FAIL() << "Cannot open file to import model: " << fileName; + } + executableNetwork = core->ImportNetwork(inputStream, targetDevice, configuration); + } +protected: + void TearDown() override { + if (remove(fileName.c_str()) != 0) { + FAIL() << "Error: could not delete file " << fileName; + } + } + +private: + std::string fileName = "exported_model.blob"; +}; + +TEST_P(ImportReshapePermuteConvGNA, CompareWithRefImpl) { + Run(); +}; + +const std::vector netPrecisions = { + InferenceEngine::Precision::FP32, + InferenceEngine::Precision::FP16 +}; + +const std::vector> exportConfigs = { + { + {"GNA_DEVICE_MODE", "GNA_SW_EXACT"}, + {"GNA_SCALE_FACTOR_0", "327.67"} + } +}; + +const std::vector> importConfigs = { + { + {"GNA_DEVICE_MODE", "GNA_SW_EXACT"}, + {"GNA_SCALE_FACTOR_0", "32767"} + }, + { + {"GNA_DEVICE_MODE", "GNA_SW_EXACT"}, + {"GNA_SCALE_FACTOR_0", "327.67"} + }, +}; + +INSTANTIATE_TEST_CASE_P(smoke_ImportNetworkCase, ImportReshapePermuteConvGNA, + ::testing::Combine( + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CommonTestUtils::DEVICE_GNA), + ::testing::ValuesIn(exportConfigs), + ::testing::ValuesIn(importConfigs)), + ImportReshapePermuteConvGNA::getTestCaseName); + +} // namespace diff --git a/inference-engine/tests/functional/plugin/myriad/shared_tests_instances/import_export_tests/import_nonzero.cpp b/inference-engine/tests/functional/plugin/myriad/shared_tests_instances/import_export_tests/import_nonzero.cpp new file mode 100644 index 00000000000..e863bb62e72 --- /dev/null +++ b/inference-engine/tests/functional/plugin/myriad/shared_tests_instances/import_export_tests/import_nonzero.cpp @@ -0,0 +1,32 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "import_export_tests/import_nonzero.hpp" +#include "vpu/private_plugin_config.hpp" + +using namespace LayerTestsDefinitions; + +namespace { + +const std::vector netPrecisions = { + InferenceEngine::Precision::FP32, +}; + +const std::vector> exportConfigs = { + {} +}; + +const std::vector> importConfigs = { + {} +}; + +INSTANTIATE_TEST_CASE_P(smoke_ImportNetworkCase, ImportNonZero, + ::testing::Combine( + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CommonTestUtils::DEVICE_MYRIAD), + ::testing::ValuesIn(exportConfigs), + ::testing::ValuesIn(importConfigs)), + ImportNonZero::getTestCaseName); + +} // namespace diff --git a/inference-engine/tests/functional/plugin/shared/include/import_export_tests/import_nonzero.hpp b/inference-engine/tests/functional/plugin/shared/include/import_export_tests/import_nonzero.hpp new file mode 100644 index 00000000000..58d863f3b88 --- /dev/null +++ b/inference-engine/tests/functional/plugin/shared/include/import_export_tests/import_nonzero.hpp @@ -0,0 +1,16 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "functional_test_utils/import_export_base/import_export_base.hpp" + +namespace LayerTestsDefinitions { + +class ImportNonZero : public FuncTestUtils::ImportNetworkTestBase { +protected: + void SetUp() override; +}; + +} // namespace LayerTestsDefinitions diff --git a/inference-engine/tests/functional/plugin/shared/include/import_export_tests/import_reshape_permute_conv.hpp b/inference-engine/tests/functional/plugin/shared/include/import_export_tests/import_reshape_permute_conv.hpp new file mode 100644 index 00000000000..e08d4e5dcc5 --- /dev/null +++ b/inference-engine/tests/functional/plugin/shared/include/import_export_tests/import_reshape_permute_conv.hpp @@ -0,0 +1,16 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "functional_test_utils/import_export_base/import_export_base.hpp" + +namespace LayerTestsDefinitions { + +class ImportReshapePermuteConv : public FuncTestUtils::ImportNetworkTestBase { +protected: + void SetUp() override; +}; + +} // namespace LayerTestsDefinitions diff --git a/inference-engine/tests/functional/plugin/shared/src/import_export_tests/import_nonzero.cpp b/inference-engine/tests/functional/plugin/shared/src/import_export_tests/import_nonzero.cpp new file mode 100644 index 00000000000..9ea9634dbd9 --- /dev/null +++ b/inference-engine/tests/functional/plugin/shared/src/import_export_tests/import_nonzero.cpp @@ -0,0 +1,26 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "import_export_tests/import_nonzero.hpp" + +#include "ngraph/opsets/opset5.hpp" + +namespace LayerTestsDefinitions { + +void ImportNonZero::SetUp() { + InferenceEngine::Precision netPrecision; + std::tie(netPrecision, targetDevice, exportConfiguration, importConfiguration) = this->GetParam(); + const auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); + + const auto parameter = std::make_shared(ngPrc, ngraph::Shape{1000}); + const auto nonZero = std::make_shared(parameter); + + function = std::make_shared(nonZero->outputs(), ngraph::ParameterVector{parameter}, "ExportImportNetwork"); +} + +TEST_P(ImportNonZero, CompareWithRefImpl) { + Run(); +}; + +} // namespace LayerTestsDefinitions diff --git a/inference-engine/tests/functional/plugin/shared/src/import_export_tests/import_reshape_permute_conv.cpp b/inference-engine/tests/functional/plugin/shared/src/import_export_tests/import_reshape_permute_conv.cpp new file mode 100644 index 00000000000..8fa48a4bdc2 --- /dev/null +++ b/inference-engine/tests/functional/plugin/shared/src/import_export_tests/import_reshape_permute_conv.cpp @@ -0,0 +1,43 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "import_export_tests/import_reshape_permute_conv.hpp" + +#include "ngraph_functions/builders.hpp" + +namespace LayerTestsDefinitions { + +void ImportReshapePermuteConv::SetUp() { + InferenceEngine::Precision netPrecision; + std::tie(netPrecision, targetDevice, exportConfiguration, importConfiguration) = this->GetParam(); + auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); + + auto params = ngraph::builder::makeParams(ngPrc, { {1, 336} }); + + std::vector outFormShapes1 = { 1, 1, 168, 2 }; + auto pattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, outFormShapes1); + auto reshape1 = std::make_shared(params[0], pattern1, false); + + auto permute1 = std::make_shared(reshape1, + ngraph::opset1::Constant::create(ngraph::element::i64, ngraph::Shape{ 4 }, { 0, 3, 1, 2 })); + + auto conv1 = ngraph::builder::makeConvolution(permute1, ngPrc, { 1, 8 }, { 1, 1 }, { 0, 0 }, { 0, 0 }, { 1, 1 }, + ngraph::op::PadType::VALID, 12); + + auto permute2 = std::make_shared(conv1, + ngraph::opset1::Constant::create(ngraph::element::i64, ngraph::Shape{ 4 }, { 0, 2, 3, 1 })); + + std::vector outFormShapes2 = { 1, 1932 }; + auto pattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes2); + auto reshape2 = std::make_shared(permute2, pattern2, false); + + ngraph::ResultVector results{ std::make_shared(reshape2) }; + function = std::make_shared(results, params, "ExportImportNetwork"); +} + +TEST_P(ImportReshapePermuteConv, CompareWithRefImpl) { + Run(); +}; + +} // namespace LayerTestsDefinitions diff --git a/inference-engine/tests/ie_test_utils/functional_test_utils/import_export_base/import_export_base.cpp b/inference-engine/tests/ie_test_utils/functional_test_utils/import_export_base/import_export_base.cpp new file mode 100644 index 00000000000..af17eb82d1c --- /dev/null +++ b/inference-engine/tests/ie_test_utils/functional_test_utils/import_export_base/import_export_base.cpp @@ -0,0 +1,73 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "import_export_base.hpp" + +#include + +namespace FuncTestUtils { + +std::string ImportNetworkTestBase::getTestCaseName(testing::TestParamInfo obj) { + InferenceEngine::Precision netPrecision; + std::string targetDevice; + std::map exportConfiguration; + std::map importConfiguration; + std::tie(netPrecision, targetDevice, exportConfiguration, importConfiguration) = obj.param; + + std::ostringstream result; + result << "netPRC=" << netPrecision.name() << "_"; + result << "targetDevice=" << targetDevice << "_"; + for (auto const& configItem : exportConfiguration) { + result << "_exportConfigItem=" << configItem.first << "_" << configItem.second; + } + for (auto const& configItem : importConfiguration) { + result << "_importConfigItem=" << configItem.first << "_" << configItem.second; + } + return result.str(); +} + +void ImportNetworkTestBase::exportImportNetwork() { + std::stringstream strm; + executableNetwork.Export(strm); + executableNetwork = core->ImportNetwork(strm, targetDevice, configuration); +} + +void ImportNetworkTestBase::Run() { + SKIP_IF_CURRENT_TEST_IS_DISABLED() + + configuration.insert(exportConfiguration.begin(), exportConfiguration.end()); + LoadNetwork(); + Infer(); + + const auto& actualOutputs = GetOutputs(); + auto referenceOutputs = CalculateRefs(); + Compare(referenceOutputs, actualOutputs); + + for (auto const& configItem : importConfiguration) { + configuration[configItem.first] = configItem.second; + } + + const auto compiledExecNetwork = executableNetwork; + exportImportNetwork(); + const auto importedExecNetwork = executableNetwork; + + Infer(); + + ASSERT_EQ(importedExecNetwork.GetInputsInfo().size(), compiledExecNetwork.GetInputsInfo().size()); + ASSERT_EQ(importedExecNetwork.GetOutputsInfo().size(), compiledExecNetwork.GetOutputsInfo().size()); + + for (const auto& next_input : importedExecNetwork.GetInputsInfo()) { + ASSERT_NO_THROW(compiledExecNetwork.GetInputsInfo()[next_input.first]); + } + for (const auto& next_output : importedExecNetwork.GetOutputsInfo()) { + ASSERT_NO_THROW(compiledExecNetwork.GetOutputsInfo()[next_output.first]); + } + auto importedOutputs = GetOutputs(); + ASSERT_EQ(actualOutputs.size(), importedOutputs.size()); + for (size_t i = 0; i < actualOutputs.size(); i++) { + Compare(actualOutputs[i], importedOutputs[i]); + } +} + +} // namespace FuncTestUtils diff --git a/inference-engine/tests/ie_test_utils/functional_test_utils/import_export_base/import_export_base.hpp b/inference-engine/tests/ie_test_utils/functional_test_utils/import_export_base/import_export_base.hpp new file mode 100644 index 00000000000..4bb5da58bae --- /dev/null +++ b/inference-engine/tests/ie_test_utils/functional_test_utils/import_export_base/import_export_base.hpp @@ -0,0 +1,34 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "functional_test_utils/layer_test_utils.hpp" + +#include + +typedef std::tuple< + InferenceEngine::Precision, // Network Precision + std::string, // Target Device + std::map, // Export Configuration + std::map // Import Configuration +> exportImportNetworkParams; + +namespace FuncTestUtils { + +class ImportNetworkTestBase : public testing::WithParamInterface, + public LayerTestsUtils::LayerTestsCommon { +public: + static std::string getTestCaseName(testing::TestParamInfo obj); + void Run() override; + +protected: + std::map exportConfiguration; + std::map importConfiguration; + +private: + virtual void exportImportNetwork(); +}; + +} // namespace FuncTestUtils