[LPT] [CPU] DepthToSpace transformation (#663)

* [LPT] [TEST] LayerTransformation generalization

* [LPT] DequantizationDetails extending

* [LPT] DepthToSpace transformation implementation
This commit is contained in:
Edward Shogulin
2020-06-07 21:12:52 +03:00
committed by GitHub
parent 93b60cacfa
commit 63ee9f8916
19 changed files with 503 additions and 45 deletions

View File

@@ -32,6 +32,19 @@ public:
return channelsCount;
}
bool isPerTensor() const {
return isPerTensor(scales, shifts);
}
static bool isPerTensor(const std::vector<float>& scales, const std::vector<float>& shifts) {
if ((scales.size() == 0) || (shifts.size() == 0)) {
THROW_IE_EXCEPTION << "scale or shift values count is not correct";
}
return
std::all_of(scales.begin(), scales.end(), [&](const float value) { return value == scales[0]; }) &&
std::all_of(shifts.begin(), shifts.end(), [&](const float value) { return value == shifts[0]; });
}
const std::vector<float> scales;
const std::vector<float> shifts;
const size_t channelsCount;

View File

@@ -0,0 +1,30 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <algorithm>
#include "ie_layers.h"
#include "low_precision_transformations/transformation_context.hpp"
#include "low_precision_transformations/transparent_base_transformation.hpp"
namespace InferenceEngine {
namespace details {
IE_SUPPRESS_DEPRECATED_START
class INFERENCE_ENGINE_API_CLASS(DepthToSpaceTransformation) : public TransparentBaseTransformation {
public:
DepthToSpaceTransformation(const Params& params) : TransparentBaseTransformation(params) {}
~DepthToSpaceTransformation() override {}
void transform(TransformationContext& context, CNNLayer& layer) const override;
bool isPrecisionPreserved(const CNNLayer& layer) const noexcept override;
bool canBeTransformed(const TransformationContext& context, const CNNLayer& layer) const override;
};
IE_SUPPRESS_DEPRECATED_END
} // namespace details
} // namespace InferenceEngine

View File

@@ -0,0 +1,71 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "low_precision_transformations/depth_to_space.hpp"
#include <algorithm>
#include <details/caseless.hpp>
#include <memory>
#include <string>
#include <vector>
#include "low_precision_transformations/common/ie_lpt_exception.hpp"
#include "low_precision_transformations/network_helper.hpp"
using namespace InferenceEngine;
using namespace InferenceEngine::details;
void DepthToSpaceTransformation::transform(TransformationContext& context, CNNLayer& layer) const {
if (!canBeTransformed(context, layer)) {
return;
}
if ((layer.insData.size() == 0) || layer.insData.size() > 2) {
THROW_IE_EXCEPTION << "layer inputs '" << layer.insData.size() << "' is not correct";
}
if (!CaselessEq<std::string>()(layer.type, "DepthToSpace")) {
THROW_IE_EXCEPTION << "layer '" << layer.name << "' is not correct";
}
TransparentBaseTransformation::transform(context, layer);
}
bool DepthToSpaceTransformation::isPrecisionPreserved(const CNNLayer& layer) const noexcept {
return true;
}
bool DepthToSpaceTransformation::canBeTransformed(const TransformationContext& context, const CNNLayer& layer) const {
if (!TransparentBaseTransformation::canBeTransformed(context, layer)) {
return false;
}
const std::vector<CNNLayerPtr> parents = CNNNetworkHelper::getParents(layer);
if (parents.size() != 1) {
return false;
}
if (parents[0]->type != "ScaleShift") {
return false;
}
const std::vector<size_t> inputDims = parents[0]->outData[0]->getDims();
if (inputDims.size() < 3) {
return false;
}
const size_t inputChannels = CNNNetworkHelper::getInputChannelsCount(layer);
const size_t outputChannels = CNNNetworkHelper::getOutputChannelsCount(layer);
if (inputChannels != outputChannels) {
std::vector<float> scales;
std::vector<float> shifts;
fillFromDequantizationLayer(*parents[0], scales, shifts);
if (!DequantizationDetails::isPerTensor(scales, shifts)) {
return false;
}
}
return true;
}

View File

@@ -27,6 +27,7 @@
#include "low_precision_transformations/concat_multi_channels.hpp"
#include "low_precision_transformations/const.hpp"
#include "low_precision_transformations/convolution.hpp"
#include "low_precision_transformations/depth_to_space.hpp"
#include "low_precision_transformations/fake_quantize.hpp"
#include "low_precision_transformations/fully_connected.hpp"
#include "low_precision_transformations/fuse_fake_quantize_and_scale_shift.hpp"
@@ -188,7 +189,8 @@ LowPrecisionTransformations LowPrecisionTransformer::getAllTransformations(const
{ "MVN", LayerTransformationPtr(new MvnTransformation(params)) },
{ "Eltwise", LayerTransformationPtr(new EltwiseTransformation(params)) },
{ "Resample", LayerTransformationPtr(new ResampleTransformation(params)) },
{ "Power", LayerTransformationPtr(new PowerTransformation(params)) }
{ "Power", LayerTransformationPtr(new PowerTransformation(params)) },
{ "DepthToSpace", LayerTransformationPtr(new DepthToSpaceTransformation(params)) }
}),
std::map<std::string, LayerTransformationPtr>({
{ "FakeQuantize", LayerTransformationPtr(new FuseFakeQuantizeAndScaleShiftTransformation(params)) },

View File

@@ -3,13 +3,15 @@
//
#include "low_precision_transformations/transparent_base_transformation.hpp"
#include "low_precision_transformations/network_helper.hpp"
#include <algorithm>
#include <memory>
#include <string>
#include <vector>
#include "low_precision_transformations/common/ie_lpt_exception.hpp"
#include "low_precision_transformations/network_helper.hpp"
using namespace InferenceEngine;
using namespace InferenceEngine::details;
@@ -29,13 +31,20 @@ void TransparentBaseTransformation::transform(TransformationContext& context, CN
CNNNetworkHelper::setOutDataPrecision(layer, getPrecisionBeforeParentDequantizationScaleShift(layer));
}
const Blob::Ptr weights_blob = CNNNetworkHelper::getBlob(scaleShift, "weights");
auto weights = CNNNetworkHelper::getFloatData(weights_blob);
const std::vector<float> scales = std::vector<float>(weights.get(), weights.get() + weights_blob->size());
std::vector<float> scales;
std::vector<float> shifts;
fillFromDequantizationLayer(*scaleShift, scales, shifts);
const Blob::Ptr biases_blob = CNNNetworkHelper::getBlob(scaleShift, "biases");
auto biases = CNNNetworkHelper::getFloatData(biases_blob);
const std::vector<float> shifts = std::vector<float>(biases.get(), biases.get() + biases_blob->size());
const size_t inputChannels = CNNNetworkHelper::getInputChannelsCount(layer);
const size_t outputChannels = CNNNetworkHelper::getOutputChannelsCount(layer);
if (inputChannels != outputChannels) {
if (!DequantizationDetails::isPerTensor(scales, shifts)) {
THROW_IE_LPT_EXCEPTION(layer) << "layer input and output channels count values are different for by channel quantization";
}
scales = std::vector<float>(outputChannels, scales[0]);
shifts = std::vector<float>(outputChannels, shifts[0]);
}
CNNNetworkHelper::removeLayer(context.network, scaleShift);
context.removeLayer(*scaleShift);

View File

@@ -25,7 +25,7 @@ const std::vector<LayerTransformation::Params> trasformationParamValues = {
INSTANTIATE_TEST_CASE_P(LPT, ConcatTransformation,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(InferenceEngine::SizeVector({ 1, 1024, 16, 16 })),
::testing::Values(InferenceEngine::SizeVector({ 1, 3, 16, 16 })),
::testing::Values(CommonTestUtils::DEVICE_CPU),
::testing::ValuesIn(trasformationParamValues)),
ConcatTransformation::getTestCaseName);

View File

@@ -28,7 +28,7 @@ const std::vector<bool> multiChannelValues = { /*true,*/ false };
INSTANTIATE_TEST_CASE_P(LPT, ConcatWithIntermediateTransformation,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(InferenceEngine::SizeVector({ 1, 1024, 16, 16 })),
::testing::Values(InferenceEngine::SizeVector({ 1, 3, 16, 16 })),
::testing::Values(CommonTestUtils::DEVICE_CPU),
::testing::ValuesIn(trasformationParamValues),
::testing::ValuesIn(transparentIntermediateValues),

View File

@@ -0,0 +1,32 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include "low_precision_transformations/depth_to_space_transformation.hpp"
#include "common_test_utils/test_constants.hpp"
using namespace LayerTestsDefinitions;
using namespace InferenceEngine::details;
namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16
};
const std::vector<LayerTransformation::Params> trasformationParamValues = {
LayerTestsUtils::LayerTransformationParamsFactory::createParams(),
LayerTestsUtils::LayerTransformationParamsFactory::createParamsI8I8(),
LayerTestsUtils::LayerTransformationParamsFactory::createParamsU8I8()
};
INSTANTIATE_TEST_CASE_P(LPT, DepthToSpaceTransformation,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(InferenceEngine::SizeVector({ 1, 32, 72, 48 })),
::testing::Values(CommonTestUtils::DEVICE_CPU),
::testing::ValuesIn(trasformationParamValues)),
DepthToSpaceTransformation::getTestCaseName);
} // namespace

View File

@@ -8,16 +8,31 @@
#include <string>
#include <ie_core.hpp>
#include "generic_ie.hpp"
#include <net_pass.h>
#include "graph_transformer.h"
#include "convert_function_to_cnn_network.hpp"
#include <transformations/common_optimizations/common_optimizations.hpp>
#include <transformations/convert_opset1_to_legacy/convert_opset1_to_legacy.hpp>
#include <transformations/convert_opset2_to_opset1/convert_opset2_to_opset1.hpp>
#include <transformations/convert_opset3_to_opset2/convert_opset3_to_opset2.hpp>
#include <ngraph/opsets/opset1.hpp>
#include <ngraph/opsets/opset2.hpp>
#include <ngraph/opsets/opset3.hpp>
#include <ngraph/op/fused/gelu.hpp>
#include "ngraph_functions/pass/convert_prc.hpp"
#include "common_test_utils/common_utils.hpp"
#include "ie_util_internal.hpp"
#include "functional_test_utils/plugin_cache.hpp"
#include "functional_test_utils/layer_test_utils.hpp"
#include "functional_test_utils/blob_utils.hpp"
#include "ngraph_functions/pass/convert_prc.hpp"
#include "ie_util_internal.hpp"
#include "functional_test_utils/layer_test_utils.hpp"
#include "functional_test_utils/low_precision_transformations/layer_transformation.hpp"
#include "low_precision_transformations/transformer.hpp"
#include "low_precision_transformations/convolution.hpp"
#include "low_precision_transformations/scaleshift_to_convolution.hpp"
@@ -34,6 +49,66 @@ InferenceEngine::details::LowPrecisionTransformations LayerTransformation::getLo
"ScaleShift");
}
InferenceEngine::CNNNetwork LayerTransformation::transform(InferenceEngine::details::LayerTransformation::Params& params) {
auto net1 = InferenceEngine::CNNNetwork(function);
std::shared_ptr<InferenceEngine::ICNNNetwork> clonedNetwork = InferenceEngine::cloneNetwork(net1);
if (clonedNetwork->getFunction()) {
const auto transformations_callback = [](const std::shared_ptr<const ::ngraph::Node> &node) -> bool {
// DepthToSpace node implementation supports only equal input/output tensors with rank <= 5
if (auto dtsOp = std::dynamic_pointer_cast<const ::ngraph::opset3::DepthToSpace>(node)) {
return dtsOp->input_value(0).get_shape().size() <= 5lu && dtsOp->input_value(0).get_shape().size() == dtsOp->get_output_shape(0).size();
}
// SpaceToDepth node implementation supports only equal input/output tensors with rank <= 5
if (auto stdOp = std::dynamic_pointer_cast<const ::ngraph::opset3::SpaceToDepth>(node)) {
return stdOp->input_value(0).get_shape().size() <= 5lu && stdOp->input_value(0).get_shape().size() == stdOp->get_output_shape(0).size();
}
return std::dynamic_pointer_cast<const ::ngraph::opset2::Gelu>(node) ||
std::dynamic_pointer_cast<const ::ngraph::opset2::BatchToSpace>(node) ||
std::dynamic_pointer_cast<const ::ngraph::opset2::SpaceToBatch>(node) ||
std::dynamic_pointer_cast<const ::ngraph::opset3::ShuffleChannels>(node);
};
auto nGraphFunc = clonedNetwork->getFunction();
// Disable shape inference (WA for generic operations)
::ngraph::op::GenericIE::DisableReshape noReshape(nGraphFunc);
// Note: instead of running all Conversion Transformations you can make up your own transformation pipeline
ngraph::pass::CommonOptimizations(transformations_callback).run_on_function(nGraphFunc);
ngraph::pass::ConvertOpSet3ToOpSet2(transformations_callback).run_on_function(nGraphFunc);
ngraph::pass::ConvertOpSet2ToOpSet1(transformations_callback).run_on_function(nGraphFunc);
ngraph::pass::ConvertOpSet1ToLegacy(transformations_callback).run_on_function(nGraphFunc);
clonedNetwork = InferenceEngine::details::convertFunctionToICNNNetwork(nGraphFunc, *clonedNetwork);
}
auto implNetwork = std::dynamic_pointer_cast<InferenceEngine::details::CNNNetworkImpl>(clonedNetwork);
if (implNetwork) {
// valid for CNNNetworkImpl only, while there's no API in ICNNNetwork to change network
InferenceEngine::ConstTransformer transformator(implNetwork.get());
transformator.fullTrim();
}
InferenceEngine::NetPass::ConvertPrecision(*implNetwork, InferenceEngine::Precision::I64, InferenceEngine::Precision::I32);
InferenceEngine::NetPass::ConvertPrecision(*implNetwork, InferenceEngine::Precision::U64, InferenceEngine::Precision::I32);
InferenceEngine::NetPass::ConvertPrecision(*implNetwork, InferenceEngine::Precision::FP16, InferenceEngine::Precision::FP32);
InferenceEngine::NetPass::ConvertPrecision(*implNetwork, InferenceEngine::Precision::BOOL, InferenceEngine::Precision::U8);
auto transformer = getLowPrecisionTransformer(params);
transformer.transform(*implNetwork);
return InferenceEngine::CNNNetwork(implNetwork);
}
InferenceEngine::CNNNetwork LayerTransformation::transform(const InferenceEngine::details::LowPrecisionTransformations& transformations) {
InferenceEngine::details::CNNNetworkImplPtr cnnNetworkImp = cloneNet(InferenceEngine::CNNNetwork(function));
InferenceEngine::details::LowPrecisionTransformer transformer(transformations);
transformer.transform(*cnnNetworkImp);
return InferenceEngine::CNNNetwork(cnnNetworkImp);
}
InferenceEngine::details::LayerTransformation::Params LayerTransformationParamsFactory::createParams() {
return InferenceEngine::details::LayerTransformation::Params(
true,

View File

@@ -22,7 +22,7 @@ const std::vector<LayerTransformation::Params> trasformationParamValues = {
INSTANTIATE_TEST_CASE_P(LPT, ConcatNeighboringGraphTransformation,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(InferenceEngine::SizeVector({ 1, 1024, 16, 16 })),
::testing::Values(InferenceEngine::SizeVector({ 1, 3, 16, 16 })),
::testing::Values(CommonTestUtils::DEVICE_GPU),
::testing::ValuesIn(trasformationParamValues)),
ConcatNeighboringGraphTransformation::getTestCaseName);

View File

@@ -22,7 +22,7 @@ const std::vector<LayerTransformation::Params> trasformationParamValues = {
INSTANTIATE_TEST_CASE_P(LPT, ConcatTransformation,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(InferenceEngine::SizeVector({ 1, 1024, 16, 16 })),
::testing::Values(InferenceEngine::SizeVector({ 1, 3, 16, 16 })),
::testing::Values(CommonTestUtils::DEVICE_GPU),
::testing::ValuesIn(trasformationParamValues)),
ConcatTransformation::getTestCaseName);

View File

@@ -25,7 +25,7 @@ const std::vector<bool> multiChannelValues = { true, false };
INSTANTIATE_TEST_CASE_P(LPT, ConcatWithIntermediateTransformation,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(InferenceEngine::SizeVector({ 1, 1024, 16, 16 })),
::testing::Values(InferenceEngine::SizeVector({ 1, 3, 16, 16 })),
::testing::Values(CommonTestUtils::DEVICE_GPU),
::testing::ValuesIn(trasformationParamValues),
::testing::ValuesIn(transparentIntermediates),

View File

@@ -0,0 +1,30 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include "low_precision_transformations/depth_to_space_transformation.hpp"
#include "common_test_utils/test_constants.hpp"
using namespace LayerTestsDefinitions;
using namespace InferenceEngine::details;
namespace {
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32
};
const std::vector<LayerTransformation::Params> trasformationParamValues = {
LayerTestsUtils::LayerTransformationParamsFactory::createParams()
};
// disabled while GPU is not supported DepthToSpace
INSTANTIATE_TEST_CASE_P(DISABLED_LPT, DepthToSpaceTransformation,
::testing::Combine(
::testing::ValuesIn(netPrecisions),
::testing::Values(InferenceEngine::SizeVector({ 1, 32, 72, 48 })),
::testing::Values(CommonTestUtils::DEVICE_GPU),
::testing::ValuesIn(trasformationParamValues)),
DepthToSpaceTransformation::getTestCaseName);
} // namespace

View File

@@ -8,16 +8,31 @@
#include <string>
#include <ie_core.hpp>
#include "generic_ie.hpp"
#include <net_pass.h>
#include "graph_transformer.h"
#include "convert_function_to_cnn_network.hpp"
#include <transformations/common_optimizations/common_optimizations.hpp>
#include <transformations/convert_opset1_to_legacy/convert_opset1_to_legacy.hpp>
#include <transformations/convert_opset2_to_opset1/convert_opset2_to_opset1.hpp>
#include <transformations/convert_opset3_to_opset2/convert_opset3_to_opset2.hpp>
#include <ngraph/opsets/opset1.hpp>
#include <ngraph/opsets/opset2.hpp>
#include <ngraph/opsets/opset3.hpp>
#include <ngraph/op/fused/gelu.hpp>
#include "ngraph_functions/pass/convert_prc.hpp"
#include "common_test_utils/common_utils.hpp"
#include "ie_util_internal.hpp"
#include "functional_test_utils/plugin_cache.hpp"
#include "functional_test_utils/layer_test_utils.hpp"
#include "functional_test_utils/blob_utils.hpp"
#include "ngraph_functions/pass/convert_prc.hpp"
#include "ie_util_internal.hpp"
#include "functional_test_utils/layer_test_utils.hpp"
#include "functional_test_utils/low_precision_transformations/layer_transformation.hpp"
#include "low_precision_transformations/transformer.hpp"
#include "low_precision_transformations/convolution.hpp"
#include "low_precision_transformations/scaleshift_to_convolution.hpp"
@@ -29,6 +44,51 @@ InferenceEngine::details::LowPrecisionTransformations LayerTransformation::getLo
return InferenceEngine::details::LowPrecisionTransformer::getAllTransformations(params);
}
InferenceEngine::CNNNetwork LayerTransformation::transform(InferenceEngine::details::LayerTransformation::Params& params) {
auto net1 = InferenceEngine::CNNNetwork(function);
std::shared_ptr<InferenceEngine::ICNNNetwork> clonedNetwork = InferenceEngine::cloneNetwork(net1);
if (clonedNetwork->getFunction()) {
const auto transformations_callback = [](const std::shared_ptr<const ::ngraph::Node> &node) -> bool {
return std::dynamic_pointer_cast<const ::ngraph::opset2::Gelu>(node) ||
std::dynamic_pointer_cast<const ::ngraph::opset2::BatchToSpace>(node);
};
auto nGraphFunc = clonedNetwork->getFunction();
// Disable shape inference (WA for generic operations)
::ngraph::op::GenericIE::DisableReshape noReshape(nGraphFunc);
// Note: instead of running all Conversion Transformations you can make up your own transformation pipeline
ngraph::pass::CommonOptimizations(transformations_callback).run_on_function(nGraphFunc);
ngraph::pass::ConvertOpSet3ToOpSet2(transformations_callback).run_on_function(nGraphFunc);
ngraph::pass::ConvertOpSet2ToOpSet1(transformations_callback).run_on_function(nGraphFunc);
ngraph::pass::ConvertOpSet1ToLegacy(transformations_callback).run_on_function(nGraphFunc);
clonedNetwork = InferenceEngine::details::convertFunctionToICNNNetwork(nGraphFunc, *clonedNetwork);
}
auto implNetwork = std::dynamic_pointer_cast<InferenceEngine::details::CNNNetworkImpl>(clonedNetwork);
if (implNetwork) {
// valid for CNNNetworkImpl only, while there's no API in ICNNNetwork to change network
InferenceEngine::ConstTransformer transformator(implNetwork.get());
transformator.fullTrim();
}
InferenceEngine::NetPass::ConvertPrecision(*implNetwork, InferenceEngine::Precision::FP16, InferenceEngine::Precision::FP32);
auto transformer = getLowPrecisionTransformer(params);
transformer.transform(*implNetwork);
return InferenceEngine::CNNNetwork(implNetwork);
}
InferenceEngine::CNNNetwork LayerTransformation::transform(const InferenceEngine::details::LowPrecisionTransformations& transformations) {
InferenceEngine::details::CNNNetworkImplPtr cnnNetworkImp = cloneNet(InferenceEngine::CNNNetwork(function));
InferenceEngine::details::LowPrecisionTransformer transformer(transformations);
transformer.transform(*cnnNetworkImp);
return InferenceEngine::CNNNetwork(cnnNetworkImp);
}
InferenceEngine::details::LayerTransformation::Params LayerTransformationParamsFactory::createParams() {
return InferenceEngine::details::LayerTransformation::Params(
true,

View File

@@ -0,0 +1,27 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <string>
#include <memory>
#include "functional_test_utils/low_precision_transformations/layer_transformation.hpp"
namespace LayerTestsDefinitions {
class DepthToSpaceTransformation :
public testing::WithParamInterface<LayerTestsUtils::LayerTransformationParams>,
public LayerTestsUtils::LayerTransformation {
public:
static std::string getTestCaseName(testing::TestParamInfo<LayerTestsUtils::LayerTransformationParams> obj);
protected:
void SetUp() override;
private:
void validate();
};
} // namespace LayerTestsDefinitions

View File

@@ -0,0 +1,119 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "low_precision_transformations/depth_to_space_transformation.hpp"
#include <memory>
#include <tuple>
#include <vector>
#include <string>
#include <ie_core.hpp>
#include "common_test_utils/common_utils.hpp"
#include "functional_test_utils/plugin_cache.hpp"
#include "functional_test_utils/layer_test_utils.hpp"
#include "functional_test_utils/blob_utils.hpp"
#include "ngraph_functions/pass/convert_prc.hpp"
#include "ngraph_functions/builders.hpp"
#include <ngraph/function.hpp>
#include <ngraph/opsets/opset1.hpp>
#include <ngraph/pass/constant_folding.hpp>
#include <ngraph_ops/fully_connected.hpp>
#include <transformations/utils/utils.hpp>
#include <transformations/init_node_info.hpp>
#include <transformations/depth_to_space_fusion.hpp>
#include <ngraph/op/fused/depth_to_space.hpp>
namespace LayerTestsDefinitions {
std::string DepthToSpaceTransformation::getTestCaseName(testing::TestParamInfo<LayerTestsUtils::LayerTransformationParams> obj) {
InferenceEngine::Precision netPrecision;
InferenceEngine::SizeVector inputShapes;
std::string targetDevice;
InferenceEngine::details::LayerTransformation::Params params;
std::tie(netPrecision, inputShapes, targetDevice, params) = obj.param;
std::ostringstream result;
result << netPrecision.name() << "_" << targetDevice << "_" << toString(params);
return result.str();
}
void DepthToSpaceTransformation::SetUp() {
InferenceEngine::SizeVector inputShape;
InferenceEngine::Precision netPrecision;
InferenceEngine::details::LayerTransformation::Params params;
std::tie(netPrecision, inputShape, targetDevice, params) = this->GetParam();
if (inputShape.size() != 4ul) {
THROW_IE_EXCEPTION << "not supported input shape size " << inputShape.size();
}
auto ngPrecision = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
const auto input = std::make_shared<ngraph::opset1::Parameter>(ngPrecision, ngraph::Shape(inputShape));
const auto fakeQuantize = ngraph::builder::makeFakeQuantize(input, ngPrecision, 256ul, { 1ul });
const auto shapeReshapeBefore = ngraph::opset1::Constant::create(
ngraph::element::i64,
ngraph::Shape{ 6ul },
ngraph::Shape{ inputShape[0], inputShape[1] / 4ul, 2ul, 2ul, inputShape[2], inputShape[3] });
const auto reshapeBefore = std::make_shared<ngraph::opset1::Reshape>(fakeQuantize, shapeReshapeBefore, false);
const auto permutation = ngraph::opset1::Constant::create(ngraph::element::i64, ngraph::Shape{ 6 }, { 0, 1, 4, 2, 5, 3 });
const auto permute = std::make_shared<ngraph::opset1::Transpose>(reshapeBefore, permutation);
const auto shapeReshapeAfter = ngraph::opset1::Constant::create(
ngraph::element::i64,
ngraph::Shape{ 4 },
ngraph::Shape{ 1, inputShape[1] / 4ul, inputShape[2] * 2, inputShape[3] * 2 });
const auto reshapeAfter = std::make_shared<ngraph::opset1::Reshape>(permute, shapeReshapeAfter, false);
function = std::make_shared<ngraph::Function>(ngraph::NodeVector{ reshapeAfter }, ngraph::ParameterVector{ input });
ngraph::pass::InitNodeInfo().run_on_function(function);
ngraph::pass::DepthToSpaceFusion().run_on_function(function);
// TODO: move to some another place
validate();
}
void DepthToSpaceTransformation::validate() {
InferenceEngine::SizeVector inputShape;
InferenceEngine::Precision netPrecision;
InferenceEngine::details::LayerTransformation::Params params;
std::tie(netPrecision, inputShape, targetDevice, params) = this->GetParam();
const InferenceEngine::CNNNetwork network = transform(params);
IE_SUPPRESS_DEPRECATED_START
InferenceEngine::OutputsDataMap outputs = network.getOutputsInfo();
EXPECT_EQ(1, outputs.size());
std::map<std::string, InferenceEngine::DataPtr>::iterator it = outputs.begin();
const InferenceEngine::CNNLayerPtr outputLayer = it->second->getCreatorLayer().lock();
EXPECT_TRUE(outputLayer != nullptr);
EXPECT_EQ("ScaleShift", outputLayer->type);
EXPECT_EQ(1ul, outputLayer->insData.size());
const InferenceEngine::DataPtr insData = outputLayer->insData[0].lock();
EXPECT_TRUE(insData != nullptr);
const InferenceEngine::CNNLayerPtr depthToSpace = insData->getCreatorLayer().lock();
EXPECT_TRUE(depthToSpace != nullptr);
EXPECT_EQ("DepthToSpace", depthToSpace->type);
if (params.updatePrecisions) {
const InferenceEngine::Precision precision = depthToSpace->outData[0]->getTensorDesc().getPrecision();
EXPECT_TRUE((precision == InferenceEngine::Precision::U8) || (precision == InferenceEngine::Precision::I8));
}
IE_SUPPRESS_DEPRECATED_END
}
TEST_P(DepthToSpaceTransformation, CompareWithRefImpl) {
Run();
};
} // namespace LayerTestsDefinitions

View File

@@ -18,6 +18,9 @@ addIeTarget(
EXPORT_DEPENDENCIES ${EXPORT_DEPENDENCIES}
)
target_include_directories(${TARGET_NAME} PUBLIC $<TARGET_PROPERTY:inference_engine_plugin_api,INTERFACE_INCLUDE_DIRECTORIES>)
target_include_directories(${TARGET_NAME} PUBLIC
$<TARGET_PROPERTY:inference_engine_plugin_api,INTERFACE_INCLUDE_DIRECTORIES>
$<TARGET_PROPERTY:inference_engine_transformations,INTERFACE_INCLUDE_DIRECTORIES>
)
target_link_libraries(${TARGET_NAME} PUBLIC ${EXPORT_DEPENDENCIES})
target_link_libraries(${TARGET_NAME} PUBLIC ${EXPORT_DEPENDENCIES} inference_engine_transformations)

View File

@@ -2,6 +2,8 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "layer_transformation.hpp"
#include <memory>
#include <tuple>
#include <vector>
@@ -9,6 +11,7 @@
#include <unordered_set>
#include <ie_core.hpp>
#include <net_pass.h>
#include "common_test_utils/common_utils.hpp"
#include "functional_test_utils/plugin_cache.hpp"
@@ -18,11 +21,9 @@
#include "ngraph_functions/pass/convert_prc.hpp"
#include "ie_util_internal.hpp"
#include "functional_test_utils/low_precision_transformations/layer_transformation.hpp"
#include "low_precision_transformations/convolution.hpp"
#include "low_precision_transformations/scaleshift_to_convolution.hpp"
namespace LayerTestsUtils {
InferenceEngine::details::LayerTransformation::Params LayerTransformationParamsFactory::createParamsU8I8() {
@@ -73,24 +74,6 @@ InferenceEngine::details::LowPrecisionTransformer LayerTransformation::getLowPre
return transformer;
}
InferenceEngine::CNNNetwork LayerTransformation::transform(InferenceEngine::details::LayerTransformation::Params& params) {
InferenceEngine::details::CNNNetworkImplPtr cnnNetworkImp = cloneNet(InferenceEngine::CNNNetwork(function));
auto transformer = getLowPrecisionTransformer(params);
transformer.transform(*cnnNetworkImp);
return InferenceEngine::CNNNetwork(cnnNetworkImp);
}
InferenceEngine::CNNNetwork LayerTransformation::transform(const InferenceEngine::details::LowPrecisionTransformations& transformations) {
InferenceEngine::details::CNNNetworkImplPtr cnnNetworkImp = cloneNet(InferenceEngine::CNNNetwork(function));
InferenceEngine::details::LowPrecisionTransformer transformer(transformations);
transformer.transform(*cnnNetworkImp);
return InferenceEngine::CNNNetwork(cnnNetworkImp);
}
void LayerTransformation::checkParentPrecision(const InferenceEngine::CNNLayerPtr& layer, const bool lowPrecision) {
EXPECT_EQ(1ul, layer->insData.size()) << "insert data count is no expected: " << layer->insData.size();
const InferenceEngine::DataPtr insData = layer->insData[0].lock();

View File

@@ -4,12 +4,16 @@
#pragma once
#include "functional_test_utils/layer_test_utils.hpp"
#include "low_precision_transformations/transformer.hpp"
#include <memory>
#include <string>
#include <sstream>
#include <tuple>
#include "ie_util_internal.hpp"
#include "low_precision_transformations/convolution.hpp"
#include "low_precision_transformations/scaleshift_to_convolution.hpp"
#include "functional_test_utils/layer_test_utils.hpp"
#include "low_precision_transformations/transformer.hpp"
namespace LayerTestsUtils {