Removed CNNNetwork BlobTransformer (#1709)

* Removed CNNNetwork BlobTransformer

* Removed inference_engine_lp_transformations dependency for GNA and VPU plugins
This commit is contained in:
Gleb Kazantaev
2020-08-11 12:14:14 +03:00
committed by GitHub
parent 8c122f4ea0
commit 10d1cd3162
8 changed files with 2 additions and 124 deletions

View File

@@ -31,7 +31,7 @@ endif()
#saving rpath to GNA shared library be used by CI
log_rpath_from_dir(GNA ${libGNA_LIBRARIES_BASE_PATH})
target_link_libraries(${TARGET_NAME} PRIVATE inference_engine inference_engine_lp_transformations Threads::Threads libGNA)
target_link_libraries(${TARGET_NAME} PRIVATE inference_engine Threads::Threads libGNA)
target_include_directories(${TARGET_NAME} PRIVATE ${CMAKE_CURRENT_SOURCE_DIR})
target_compile_definitions(${TARGET_NAME}
PRIVATE

View File

@@ -18,7 +18,6 @@
#include <utility>
#include <limits>
#include <low_precision_transformations/blob_transformation.hpp>
#include <legacy/graph_tools.hpp>
#include <legacy/net_pass.h>
#include <debug.h>
@@ -349,10 +348,6 @@ void GNAPlugin::LoadNetwork(ICNNNetwork & _network) {
NetPass::ConvertPrecision(network, Precision::U64, Precision::I32);
NetPass::ConvertPrecision(network, Precision::U32, Precision::I32);
// move blobs from Constant layers to Convolution, Deconvolution, FullyConnected layers attributes
BlobTransformation blobsTransformation;
blobsTransformation.transform(network, true);
// Check the input network
std::string error;
if (!AreLayersSupported(network, error)) {

View File

@@ -1,30 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <algorithm>
#include <string>
#include <unordered_set>
#include <ie_icnn_network.hpp>
namespace InferenceEngine {
namespace details {
class INFERENCE_ENGINE_API_CLASS(BlobTransformation) {
public:
BlobTransformation() = default;
void transform(ICNNNetwork& network, bool transformWithFakeQuantizeOnWeights = false) const;
private:
const std::unordered_set<std::string> layersForTransformations = {
"Convolution",
"Deconvolution",
"FullyConnected"
};
};
} // namespace details
} // namespace InferenceEngine

View File

@@ -1,57 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "low_precision_transformations/blob_transformation.hpp"
#include "low_precision_transformations/network_helper.hpp"
#include <legacy/details/ie_cnn_network_tools.h>
#include <algorithm>
#include <vector>
using namespace InferenceEngine;
using namespace InferenceEngine::details;
void BlobTransformation::transform(ICNNNetwork& network, bool transformWithFakeQuantizeOnWeights) const {
const std::vector<CNNLayerPtr> layers = CNNNetSortTopologically(network);
for (const CNNLayerPtr& layer : layers) {
if (layer->insData.size() < 2) {
continue;
}
if (this->layersForTransformations.find(layer->type) == this->layersForTransformations.end()) {
continue;
}
const CNNLayerPtr weightsLayer = CNNNetworkHelper::getParent(*layer, 1);
if ((!transformWithFakeQuantizeOnWeights) &&
((weightsLayer->type == "FakeQuantize") || (weightsLayer->type == "Quantize"))) {
continue;
}
WeightableLayer* weightableLayer = dynamic_cast<WeightableLayer*>(layer.get());
if (weightableLayer == nullptr) {
continue;
}
const Blob::Ptr weightsBlob = CNNNetworkHelper::getWeights(*layer, false);
if (weightsBlob != nullptr) {
weightableLayer->blobs["weights"] = weightsBlob;
weightableLayer->_weights = weightsBlob;
}
if (layer->insData.size() >= 3) {
const Blob::Ptr biasesBlob = CNNNetworkHelper::getBiases(*layer);
if (biasesBlob != nullptr) {
weightableLayer->blobs["biases"] = biasesBlob;
weightableLayer->_biases = biasesBlob;
}
CNNLayerPtr biasesLayer = CNNNetworkHelper::getParent(*layer, 2);
CNNNetworkHelper::removeLayer(network, biasesLayer);
}
CNNNetworkHelper::removeLayer(network, weightsLayer);
}
}

View File

@@ -45,7 +45,7 @@ function(add_graph_transformer_target TARGET_NAME STATIC_IE)
target_link_libraries(${TARGET_NAME} PUBLIC pugixml vpu_common_lib)
endif()
target_link_libraries(${TARGET_NAME} PUBLIC ${NGRAPH_LIBRARIES} inference_engine_lp_transformations
target_link_libraries(${TARGET_NAME} PUBLIC ${NGRAPH_LIBRARIES}
PRIVATE openvino::itt)
if(WIN32)

View File

@@ -66,9 +66,6 @@ private:
void removeConstLayers(
ie::ICNNNetwork& network);
void moveConstInputsToBlobs(
ie::ICNNNetwork& network);
//
// Process internal VPU Model
//

View File

@@ -426,8 +426,6 @@ ModelPtr FrontEnd::runCommonPasses(ie::ICNNNetwork& network, const UnsupportedLa
ie::NetPass::ConvertPrecision(*originalOrConvertNetwork, ie::Precision::U64, ie::Precision::I32);
ie::NetPass::ConvertPrecision(*originalOrConvertNetwork, ie::Precision::BOOL, ie::Precision::I32);
moveConstInputsToBlobs(*originalOrConvertNetwork);
removeConstLayers(*originalOrConvertNetwork);
unrollLoops(*originalOrConvertNetwork);

View File

@@ -1,25 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <vpu/frontend/frontend.hpp>
#include <low_precision_transformations/blob_transformation.hpp>
#include <vpu/compile_env.hpp>
namespace vpu {
void FrontEnd::moveConstInputsToBlobs(ie::ICNNNetwork& network) {
VPU_PROFILE(moveConstInputsToBlobs);
const auto& env = CompileEnv::get();
env.log->trace("Move const inputs to blobs");
VPU_LOGGER_SECTION(env.log);
ie::details::BlobTransformation blobsTransformation;
blobsTransformation.transform(network, true);
}
} // namespace vpu