Deprecated cnn layer (#1138)

* Deprecated getInputTo, getCreatorLayer

* Fixes

* Fixed ie_layers moving to legacy

* Fixed onnx importer dependency

* Fixed python

* Fix python API compilation

* Added comments not to use _impl from Data

Co-authored-by: Nadezhda Ageeva <nadezhda.ageeva@intel.com>
This commit is contained in:
Ilya Lavrenov 2020-07-03 20:57:28 +03:00 committed by GitHub
parent 6365fcb6f5
commit 4f0225014d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
100 changed files with 641 additions and 573 deletions

View File

@ -15,7 +15,9 @@ if (ENABLE_SANITIZER)
if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
set(SANITIZER_LINKER_FLAGS "${SANITIZER_LINKER_FLAGS} -fuse-ld=gold")
elseif(CMAKE_CXX_COMPILER_ID MATCHES "^(Apple)?Clang$" AND NOT WIN32)
set(SANITIZER_LINKER_FLAGS "${SANITIZER_LINKER_FLAGS} -fuse-ld=lld")
if(CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 8.0)
set(SANITIZER_LINKER_FLAGS "${SANITIZER_LINKER_FLAGS} -fuse-ld=lld")
endif()
endif()
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SANITIZER_COMPILER_FLAGS}")

View File

@ -717,7 +717,7 @@ cdef class DataPtr:
@property
def creator_layer(self):
cdef C.CNNLayerWeakPtr _l_ptr = deref(self._ptr).getCreatorLayer()
cdef C.CNNLayerWeakPtr _l_ptr = C.getCreatorLayer(self._ptr)
cdef IENetLayer creator_layer
creator_layer = IENetLayer()
if _l_ptr.lock() != NULL:
@ -728,7 +728,7 @@ cdef class DataPtr:
@property
def input_to(self):
cdef map[string, C.CNNLayerPtr] _l_ptr_map = deref(self._ptr).getInputTo()
cdef map[string, C.CNNLayerPtr] _l_ptr_map = C.getInputTo(self._ptr)
cdef IENetLayer input_to
input_to_list = []
for layer in _l_ptr_map:
@ -760,28 +760,6 @@ cdef class CDataPtr:
def initialized(self):
return deref(self._ptr).isInitialized()
# TODO: Resolve compilation error
# @property
# def creator_layer(self):
# cdef C.CNNLayerWeakPtr _l_ptr = deref(self._ptr).getCreatorLayer()
# cdef IENetLayer creator_layer
# creator_layer = IENetLayer()
# if _l_ptr.lock() != NULL:
# creator_layer._ptr = _l_ptr.lock()
# else:
# raise RuntimeError("Creator IENetLayer of DataPtr object with name {} already released!".format(self.name))
# return creator_layer
# @property
# def input_to(self):
# cdef map[string, C.CNNLayerPtr] _l_ptr_map = deref(self._ptr).getInputTo()
# cdef IENetLayer input_to
# input_to_list = []
# for layer in _l_ptr_map:
# input_to = IENetLayer()
# input_to._ptr = layer.second
# input_to_list.append(input_to)
# return input_to_list
## This class represents a network instance loaded to plugin and ready for inference.
cdef class ExecutableNetwork:
@ -1293,6 +1271,7 @@ cdef class IENetLayer:
@params.setter
def params(self, new_params):
deref(self._ptr).params = dict_to_c_map(new_params)
## Returns a list, which contains names of layers preceding this layer
@property
def parents(self):
@ -1312,11 +1291,10 @@ cdef class IENetLayer:
cdef map[string, C.CNNLayerPtr] _l_ptr_map
input_to_list = []
for l in c_outs:
_l_ptr_map = deref(l).getInputTo()
_l_ptr_map = C.getInputTo(l)
for layer in _l_ptr_map:
input_to_list.append(deref(layer.second).name.decode())
return input_to_list
## \note This property is deprecated.
# Please, use out_data property to access DataPtr objects for all output ports, which contains full
# information about layer's output data including layout

View File

@ -22,7 +22,8 @@
#include <mutex>
#include <ie_extension.h>
#include "inference_engine.hpp"
#include <ie_layers.h>
#include <ie_core.hpp>
typedef std::chrono::high_resolution_clock Time;
typedef std::chrono::nanoseconds ns;

View File

@ -44,8 +44,6 @@ cdef extern from "<inference_engine.hpp>" namespace "InferenceEngine":
const Layout getLayout() except +
void setLayout(Layout layout) except +
const bool isInitialized() except +
weak_ptr[CNNLayer] & getCreatorLayer() except +
map[string, shared_ptr[CNNLayer]] & getInputTo() except +
ctypedef shared_ptr[Data] DataPtr
ctypedef weak_ptr[Data] DataWeakPtr
@ -143,6 +141,11 @@ cdef extern from "<inference_engine.hpp>" namespace "InferenceEngine":
CN
BLOCKED
cdef extern from "<ie_layers.h>" namespace "InferenceEngine":
cdef weak_ptr[CNNLayer] getCreatorLayer(const shared_ptr[Data] & data)
map[string, shared_ptr[CNNLayer]] & getInputTo(const shared_ptr[Data] & data)
cdef extern from "ie_api_impl.hpp" namespace "InferenceEnginePython":
cdef cppclass ProfileInfo:

View File

@ -40,3 +40,21 @@ def test_layout():
def test_initialized():
assert layer_out_data().initialized, "Incorrect value for initialized property for layer '19/Fused_Add_'"
def test_input_to():
ie = IECore()
net = ie.read_network(model=test_net_xml, weights=test_net_bin)
input_to = net.layers['26'].out_data[0].input_to
assert len(input_to) == 1
assert input_to[0].name == '27'
def test_creator_layer():
ie = IECore()
net = ie.read_network(model=test_net_xml, weights=test_net_bin)
outputs = net.outputs
assert len(outputs) == 1
creator_layer = outputs['fc_out'].creator_layer
params = creator_layer.params
params['originalLayersNames'] == 'fc_out'
params['axis'] == '1'

View File

@ -93,19 +93,6 @@ def test_params_setter():
"strides" : "2,2", "pool-method" : "max",
"originalLayersNames" : "27", 'PrimitivesPriority': 'cpu:ref_any'}
def test_layer_parents():
ie = IECore()
net = ie.read_network(model=test_net_xml, weights=test_net_bin)
assert net.layers['27'].parents == ['26']
def test_layer_children():
ie = IECore()
net = ie.read_network(model=test_net_xml, weights=test_net_bin)
assert net.layers['27'].children == ['29']
def test_layout(recwarn):
warnings.simplefilter("always")
ie = IECore()
@ -133,3 +120,17 @@ def test_in_data():
ie = IECore()
net = ie.read_network(model=test_net_xml, weights=test_net_bin)
assert isinstance(net.layers['27'].in_data[0], DataPtr)
def test_parents():
ie = IECore()
net = ie.read_network(model=test_net_xml, weights=test_net_bin)
parents = net.layers['27'].parents
assert len(parents) == 1
assert(parents[0] == '26')
def test_children():
ie = IECore()
net = ie.read_network(model=test_net_xml, weights=test_net_bin)
children = net.layers['26'].children
assert len(children) == 1
assert(children[0] == '27')

View File

@ -9,8 +9,6 @@
*/
#pragma once
#include "details/ie_no_copy.hpp"
#if defined(USE_STATIC_IE) || (defined(__GNUC__) && (__GNUC__ < 4))
# define INFERENCE_ENGINE_API(...) extern "C" __VA_ARGS__
# define INFERENCE_ENGINE_API_CPP(...) __VA_ARGS__
@ -52,14 +50,6 @@
# define INFERENCE_ENGINE_INTERNAL(msg) INFERENCE_ENGINE_DEPRECATED(msg)
#endif
#if defined IMPLEMENT_INFERENCE_ENGINE_API || defined IMPLEMENT_INFERENCE_ENGINE_PLUGIN
# define INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(...) INFERENCE_ENGINE_API_CLASS(__VA_ARGS__)
#else
# define INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(...) \
INFERENCE_ENGINE_INTERNAL("Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1") \
INFERENCE_ENGINE_API_CLASS(__VA_ARGS__)
#endif
// Suppress warning "-Wdeprecated-declarations" / C4996
#if defined(_MSC_VER)
# define IE_DO_PRAGMA(x) __pragma(x)

View File

@ -28,6 +28,7 @@ namespace InferenceEngine {
*/
class INFERENCE_ENGINE_API_CLASS(Data) {
class Impl;
public:
/**
* @brief An empty constructor (dimensionless)
@ -46,6 +47,21 @@ public:
*/
Data(const std::string& name, const TensorDesc& desc);
/**
* @brief A copy constructor
*
* @param data A data object to copy from
*/
Data(const Data& data);
/**
* @brief An assignment operator
*
* @param data A data object to copy from
* @return An assigned object
*/
Data & operator = (const Data& data);
/**
* @brief A virtual destructor
*/
@ -113,14 +129,6 @@ public:
*/
const SizeVector& getDims() const;
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief Returns an owner of this data layer, parent layer in di-graph
* @return A weak pointer to CNNLayer that creates this data
*/
INFERENCE_ENGINE_INTERNAL("Migrate to IR v10 and work with ngraph::Function directly")
virtual CNNLayerWeakPtr& getCreatorLayer();
/**
* @return name of the data object
*/
@ -134,36 +142,23 @@ public:
void setName(const std::string& newName);
/**
* @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1
* @brief Privates child layers in di-graph
* @return A map of child layers
*/
INFERENCE_ENGINE_INTERNAL("Migrate to IR v10 and work with ngraph::Function directly")
virtual std::map<std::string, CNNLayerPtr>& getInputTo();
/**
* @return convenient arbitrary user data holder
*/
const UserValue& getUserObject() const;
private:
/**
* @brief A pointer to the layer that creates this data element, null for input data elements
* @private
* @brief Don't touch this field. An implementation details for Data object.
*/
CNNLayerWeakPtr creatorLayer;
std::shared_ptr<Impl> _impl;
private:
/**
* @brief A unique name that identifies this data node
*/
std::string name;
/**
* @brief A map of layers that use this node as input.
* It is useful for recursive NN graph traversal.
*/
std::map<std::string, CNNLayerPtr> inputTo;
/**
* @brief A user utility place holder
*/

View File

@ -19,7 +19,6 @@
#include "ie_data.h"
#include "ie_iextension.h"
#include "ie_input_info.hpp"
#include "ie_layers.h"
#include "ie_preprocess.hpp"
namespace ngraph {

View File

@ -19,7 +19,6 @@
#include "ie_common.h"
#include "ie_layouts.h"
#include "ie_blob.h"
#include "ie_layers.h"
#include "ie_version.hpp"
/**

View File

@ -10,7 +10,6 @@
#include <ie_api.h>
#include <ie_blob.h>
#include <ie_layers.h>
#include <cpp/ie_executable_network.hpp>
#include <ie_core.hpp>
#include <ie_icnn_network.hpp>

View File

@ -290,7 +290,7 @@ void clDNNEngine::QueryNetwork(const ICNNNetwork& network, const std::map<std::s
// take all parrents.
bool supported = true;
for (DataWeakPtr insData : concat->insData) {
CNNLayerPtr prev = insData.lock()->getCreatorLayer().lock();
CNNLayerPtr prev = getCreatorLayer(insData.lock()).lock();
// verify if previous layer is not supported or if it in the list of not defined layers yet
// not defined layers are treated as layers which will be assigned to GPU if next layer is assigned to GPU
if (res.supportedLayersMap.find(prev->name) == res.supportedLayersMap.end()
@ -310,7 +310,7 @@ void clDNNEngine::QueryNetwork(const ICNNNetwork& network, const std::map<std::s
cnl++) {
bool supported = true;
for (DataPtr out : (*cnl)->outData) {
for (auto ol : out->getInputTo()) {
for (auto ol : getInputTo(out)) {
if (res.supportedLayersMap.find(ol.second->name) == res.supportedLayersMap.end()) {
supported = false;
}

View File

@ -414,7 +414,7 @@ InferenceEngine::ICNNNetwork::Ptr CLDNNGraph::GetExecGraphInfoByPrimitivesInfo(s
std::string data_name = pi.original_id + "_out" + std::to_string(i);
layer->outData[i] = std::make_shared<Data>(data_name, desc_from_layout(pi.output_layout));
data = layer->outData[i];
data->getCreatorLayer() = layer;
getCreatorLayer(data) = layer;
} else {
data = layer->outData[0];
}
@ -431,7 +431,7 @@ InferenceEngine::ICNNNetwork::Ptr CLDNNGraph::GetExecGraphInfoByPrimitivesInfo(s
}
if (dep == pi.original_id && child_layer->insData[in_port_id].lock() == nullptr) {
data->getInputTo()[child_layer->name] = child_layer;
getInputTo(data)[child_layer->name] = child_layer;
child_layer->insData[in_port_id] = data;
break;
}

View File

@ -180,7 +180,7 @@ bool Program::CanProcessDynBatch(InferenceEngine::ICNNNetwork &network) const {
if (inputs.empty())
return false;
auto & secondLayers = inputs.begin()->second->getInputData()->getInputTo();
auto & secondLayers = getInputTo(inputs.begin()->second->getInputData());
if (secondLayers.empty())
return false;
@ -343,7 +343,7 @@ std::vector<InferenceEngine::CNNLayerPtr> Program::GetNextLayers(const Inference
if (data == nullptr) {
return nextLayers;
}
for (auto nl : data->getInputTo()) {
for (auto nl : getInputTo(data)) {
nextLayers.push_back(nl.second);
}
return nextLayers;
@ -913,7 +913,7 @@ void Program::CreateWeightAndBiasPrimitives(cldnn::topology& topology,
}
if (pWeightsBlob == nullptr) {
auto wei_name = layer_type_name_ID(layer->insData[inputs_count].lock()->getCreatorLayer().lock());
auto wei_name = layer_type_name_ID(getCreatorLayer(layer->insData[inputs_count].lock()).lock());
if (primitiveIDs.find(wei_name) != primitiveIDs.end()) {
weightsPrimID.push_back(primitiveIDs.at(wei_name));
} else {
@ -947,7 +947,7 @@ void Program::CreateWeightAndBiasPrimitives(cldnn::topology& topology,
biasesLayout);
biasesPrimID.push_back(biasID);
} else if (layer->insData.size() == inputs_count + 2) {
auto bias_name = layer_type_name_ID(layer->insData[inputs_count + 1].lock()->getCreatorLayer().lock());
auto bias_name = layer_type_name_ID(getCreatorLayer(layer->insData[inputs_count + 1].lock()).lock());
if (primitiveIDs.find(bias_name) != primitiveIDs.end()) {
biasesPrimID.push_back(primitiveIDs.at(bias_name));
} else {
@ -1000,7 +1000,7 @@ void Program::CreateBinaryWeightAndBiasPrimitives(cldnn::topology& topology,
// create weights primitive
if (pWeightsBlob == nullptr) {
auto wei_name = layer_type_name_ID(layer->insData[1].lock()->getCreatorLayer().lock());
auto wei_name = layer_type_name_ID(getCreatorLayer(layer->insData[1].lock()).lock());
weightsPrimID.push_back(wei_name);
} else {
cldnn::layout weightsLayout = cldnn::layout(
@ -2635,7 +2635,7 @@ void Program::CreatePoolingPrimitive(cldnn::topology& topology, InferenceEngine:
int outputOrder = 0;
for (auto out : poolLayer->outData) {
auto layersMap = out->getInputTo();
auto layersMap = getInputTo(out);
for (auto item : layersMap) {
bool isUpooling = (LayerTypeFromStr(item.second->type) == Unpooling);
@ -3172,7 +3172,7 @@ void Program::CreateTopKPrimitive(cldnn::topology& topology, InferenceEngine::CN
stype = cldnn::arg_max_min::sort_type::sort_by_indices;
auto topKInput = layer->insData[1].lock();
auto topKInputCreator = topKInput->getCreatorLayer().lock();
auto topKInputCreator = getCreatorLayer(topKInput).lock();
std::vector<int32_t> topk;
if (topKInputCreator->blobs.size() == 1) {
@ -3310,7 +3310,7 @@ void Program::CreateMaxUnpoolingPrimitive(cldnn::topology& topology, InferenceEn
THROW_CLDNN_EXCEPTION("MaxUnpooling: nonexistent input for layer: " << layer->name);
}
auto prevCreator = prevData->getCreatorLayer().lock();
auto prevCreator = getCreatorLayer(prevData).lock();
if (prevCreator &&
(LayerTypeFromStr(prevCreator->type) == Pooling) &&
@ -3896,7 +3896,7 @@ void Program::CreateSpaceToBatchPrimitive(cldnn::topology& topology, InferenceEn
for (size_t i = 1; i < 4; ++i) {
auto defaultIndexInput = layer->insData[i].lock();
auto defaultIndexInputCreator = defaultIndexInput->getCreatorLayer().lock();
auto defaultIndexInputCreator = getCreatorLayer(defaultIndexInput).lock();
if (defaultIndexInputCreator->blobs.size() == 1) {
auto constantBlob = defaultIndexInputCreator->blobs.begin()->second;
auto defaultIndexPrecision = constantBlob->getTensorDesc().getPrecision();
@ -4166,7 +4166,7 @@ void Program::CreateReducePrimitive(cldnn::topology& topology, InferenceEngine::
size_t reduceDimNumber = input->getTensorDesc().getDims().size();
auto axesInput = layer->insData[1].lock();
auto axesInputCreator = axesInput->getCreatorLayer().lock();
auto axesInputCreator = getCreatorLayer(axesInput).lock();
std::vector<int32_t> rawAxes;
if (axesInputCreator->blobs.size() == 1) {
@ -4580,7 +4580,7 @@ void Program::CreateCumSumPrimitive(cldnn::topology& topology, InferenceEngine::
int32_t axis = 0;
if (inputPrimitives.size() == 2) {
auto axesInput = layer->insData[1].lock();
auto axesInputCreator = axesInput->getCreatorLayer().lock();
auto axesInputCreator = getCreatorLayer(axesInput).lock();
if (axesInputCreator->blobs.size() == 1) {
auto constantBlob = axesInputCreator->blobs.begin()->second;
auto axesPrecision = constantBlob->getTensorDesc().getPrecision();
@ -4739,7 +4739,7 @@ void Program::CreateEmbeddingBagOffsetsSumPrimitive(cldnn::topology& topology, I
int32_t defaultIndex = -1;
if (inputPrimitives.size() > 3) {
auto defaultIndexInput = layer->insData[3].lock();
auto defaultIndexInputCreator = defaultIndexInput->getCreatorLayer().lock();
auto defaultIndexInputCreator = getCreatorLayer(defaultIndexInput).lock();
if (defaultIndexInputCreator->blobs.size() == 1) {
auto constantBlob = defaultIndexInputCreator->blobs.begin()->second;
auto defaultIndexPrecision = constantBlob->getTensorDesc().getPrecision();
@ -4802,7 +4802,7 @@ void Program::CreateEmbeddingSegmentsSumPrimitive(cldnn::topology& topology, Inf
int32_t defaultIndex = -1;
if (inputPrimitives.size() > 3) {
auto defaultIndexInput = layer->insData[4].lock();
auto defaultIndexInputCreator = defaultIndexInput->getCreatorLayer().lock();
auto defaultIndexInputCreator = getCreatorLayer(defaultIndexInput).lock();
if (defaultIndexInputCreator->blobs.size() == 1) {
auto constantBlob = defaultIndexInputCreator->blobs.begin()->second;
auto defaultIndexPrecision = constantBlob->getTensorDesc().getPrecision();
@ -4884,7 +4884,7 @@ bool Program::IsValidSplitConvMerge(const InferenceEngine::SplitLayer *splitLaye
if (splitLayer->outData.size() != 2) return false; // split into 2
for (auto out : splitLayer->outData) {
if (out->getInputTo().size() != 1) {
if (getInputTo(out).size() != 1) {
return false;
}
}
@ -4935,7 +4935,7 @@ void Program::AddInputPrimitive(cldnn::topology& topology, InputInfo::Ptr inputI
const auto inputDims = inputDesc.getDims();
Layout l = inputDesc.getLayout();
Precision ip = inputDesc.getPrecision();
auto consumers = inputInfo->getInputData()->getInputTo();
auto consumers = getInputTo(inputInfo->getInputData());
cldnn::format inputFormat = m_defaultFormat;
if (InferenceEngine::Layout::BLOCKED == l && 6 == inputDims.size())
@ -5155,7 +5155,7 @@ std::vector<cldnn::primitive_id> Program::GetPrevLayersPrimitives(const Inferenc
if (prevData == nullptr) {
THROW_CLDNN_EXCEPTION("Nonexistent input for layer: " << layer->name);
}
auto prevCreator = prevData->getCreatorLayer().lock();
auto prevCreator = getCreatorLayer(prevData).lock();
std::string prevName;
if (prevCreator) {
@ -5189,7 +5189,7 @@ void Program::AddOutputPrimitive(cldnn::topology& topology, std::string outputNa
THROW_CLDNN_EXCEPTION("Unsupported layout (" << outputlayout << ") in output: " << outputName);
}
auto outputCreator = outputData->getCreatorLayer().lock();
auto outputCreator = getCreatorLayer(outputData).lock();
std::string outLayerName = layer_type_lower(outputCreator) + ":";
if (outputCreator->outData.size() > 1)

View File

@ -13,6 +13,7 @@
#include <algorithm>
#include <cpp/ie_cnn_network.h>
#include <ie_layers.h>
#include <cpp_interfaces/exception2status.hpp>
#include <ie_blob.h>

View File

@ -74,7 +74,7 @@ class ModelQuantizer {
copiedNet->getInputsInfo(dm);
int scaleIndex = 0;
for (auto &&inputData : dm) {
auto inputLayer = inputData.second->getInputData()->getCreatorLayer().lock();
auto inputLayer = getCreatorLayer(inputData.second->getInputData()).lock();
auto quantData = InferenceEngine::getInjectedData<QuantizedLayerParams>(inputLayer);
if (scaleFactor.size() <= scaleIndex) {
THROW_GNA_EXCEPTION << "Scale factors are not set for some of the inputs";

View File

@ -26,7 +26,7 @@ class FuzedLayersIterator {
explicit FuzedLayersIterator(InferenceEngine::CNNLayer* origin) {
bool hasActivation = false;
for (auto && data : origin->outData) {
auto & inputTo = data->getInputTo();
auto & inputTo = getInputTo(data);
for (auto i = inputTo.begin(); i != inputTo.end(); i++) {
LayerInfo info(i->second);
if (info.isActivation()) {

View File

@ -70,7 +70,7 @@ intel_dnn_component_t * GNAGraphCompiler::find_first_unused_input(InferenceEngin
if (inData == nullptr)
return nullptr;
auto prev_layer = inData->getCreatorLayer().lock();
auto prev_layer = getCreatorLayer(inData).lock();
return dnnComponents.findComponent(prev_layer);
}
@ -131,7 +131,7 @@ void GNAGraphCompiler::fillSplitConnections(InferenceEngine::CNNLayerPtr layer)
if (!dataInput) {
THROW_GNA_LAYER_EXCEPTION(layer) << "Input layer pointer is unexpectedly absent";
}
auto ptrSplitLayerInput = dataInput->getCreatorLayer().lock();
auto ptrSplitLayerInput = getCreatorLayer(dataInput).lock();
if (!ptrSplitLayerInput) {
THROW_GNA_LAYER_EXCEPTION(layer) << "Input layer for is unexpectedly absent";
}
@ -141,7 +141,7 @@ void GNAGraphCompiler::fillSplitConnections(InferenceEngine::CNNLayerPtr layer)
size_t output_layer_size = 0;
for (int j = 0; j != layer->outData[i]->getInputTo().size(); j++) {
for (int j = 0; j != getInputTo(layer->outData[i]).size(); j++) {
auto outFunctionalLayer = CNNNetGetNextLayerSkipCertain(layer, i, j, [](CNNLayerPtr l) {
return LayerInfo(l).isNonFunctional();
});
@ -533,7 +533,7 @@ void GNAGraphCompiler::ConcatPrimitive(InferenceEngine::CNNLayerPtr layer) {
}
auto& concatLayerInfo = concat_connection.find(concatLayer->name)->second;
for (auto &&outLayer : concatLayer->outData.front()->getInputTo()) {
for (auto &&outLayer : getInputTo(concatLayer->outData.front())) {
if ( LayerInfo(outLayer.second).isConcat() ) {
connectOutput(layer, &concatLayerInfo.gna_ptr, concatLayerInfo.reserved_size);
}
@ -555,7 +555,7 @@ void GNAGraphCompiler::ConcatPrimitive(InferenceEngine::CNNLayerPtr layer) {
}
IE_ASSERT(it != concatLayerInput->insData.size());
auto layerInfo = LayerInfo(concatParent);
// auto layerInfo = LayerInfo(concatLayerInput->insData[it].lock()->getCreatorLayer().lock());
// auto layerInfo = LayerInfo(getCreatorLayer(concatLayerInput->insData[it].lock()).lock());
if (layerInfo.isInput()) {
if (concatLayerInfo.input_allocated) {
// for concat input allocated only once, so lets mark this specific input layer also as allocated
@ -632,7 +632,7 @@ void GNAGraphCompiler::CropPrimitive(InferenceEngine::CNNLayerPtr layer) {
connectInput(layer, &cropLayerInfo->second.gna_ptr, cropOutputSize + cropOffset, cropOffset, 0);
// cases for certain output layers
for (auto&& outLayer : layer->outData.front()->getInputTo()) {
for (auto&& outLayer : getInputTo(layer->outData.front())) {
auto& nextLayer = outLayer.second;
if (LayerInfo(nextLayer).isConcat()) {
connectOutput(layer, &cropLayerInfo->second.gna_ptr, cropOutputSize);
@ -1520,7 +1520,7 @@ void GNAGraphCompiler::connectOutput(InferenceEngine::CNNLayerPtr layer, void *p
gnalog() << "Connecting output " << layer->name << " ...\n";
// in case of Memory Layer it's input allocated in meminput layer
if (layer->outData.size() == 1) {
for (int j = 0; j != layer->outData.front()->getInputTo().size(); j++) {
for (int j = 0; j != getInputTo(layer->outData.front()).size(); j++) {
auto isNonFunctional = [](CNNLayerPtr l) {
return LayerInfo(l).isNonFunctional();
};
@ -1560,7 +1560,7 @@ void GNAGraphCompiler::connectOutput(InferenceEngine::CNNLayerPtr layer, void *p
// if one of next direct or via split layers is concat...
auto concatChild = [](CNNLayerPtr layer) {
CNNLayerPtr concat;
for (auto &&outLayer : layer->outData.front()->getInputTo()) {
for (auto &&outLayer : getInputTo(layer->outData.front())) {
auto nextLayer = outLayer.second;
if (LayerInfo(nextLayer).isConcat()) {
concat = nextLayer;
@ -1570,7 +1570,7 @@ void GNAGraphCompiler::connectOutput(InferenceEngine::CNNLayerPtr layer, void *p
};
auto splitChild = [](CNNLayerPtr layer) {
std::list<CNNLayerPtr> split;
for (auto &&outLayer : layer->outData.front()->getInputTo()) {
for (auto &&outLayer : getInputTo(layer->outData.front())) {
auto nextLayer = outLayer.second;
if (LayerInfo(nextLayer).isSplit() || LayerInfo(nextLayer).isNonFunctional()) {
split.push_back(nextLayer);

View File

@ -43,7 +43,7 @@ inline bool areEqualDatas(DataPtr source, DataPtr target) {
/// @brief utility to locate input data idx from given outdata and given layer
inline std::vector<int> CNNLayerFindInsDataIdxes(DataPtr sourceData, CNNLayerPtr layer) {
std::vector<int> dataIdxes;
auto outLayers = sourceData->getInputTo();
auto outLayers = getInputTo(sourceData);
for (auto & outLayer : outLayers) {
if (outLayer.second.get() != layer.get()) {
continue;
@ -67,7 +67,7 @@ inline InferenceEngine::CNNLayerPtr CNNNetPrevLayer(const InferenceEngine::CNNL
if (CNNNetHasPrevLayer(layer.get(), idx)) {
auto prevData = layer->insData[idx].lock();
IE_ASSERT(prevData != nullptr);
return prevData->getCreatorLayer().lock();
return getCreatorLayer(prevData).lock();
} else {
THROW_IE_EXCEPTION << "Layer " << layer->name << " has no previous layer";
}
@ -82,7 +82,7 @@ inline InferenceEngine::CNNLayerPtr CNNNetPrevLayer(const InferenceEngine::CNNL
IE_ASSERT(layer != nullptr);
if (CNNNetHasPrevLayer(layer, idx)) {
auto prevData = layer->insData[idx].lock();
return prevData->getCreatorLayer().lock();
return getCreatorLayer(prevData).lock();
} else {
THROW_IE_EXCEPTION << "Layer " << layer->name << " has no previous layer";
}
@ -157,12 +157,12 @@ inline std::pair<InferenceEngine::CNNLayerPtr, int> CNNNetCheckNextLayerSkipCer
if (bOnlyCheck) return {nullptr, 0};
THROW_GNA_LAYER_EXCEPTION(layer) << " no next output layer for outdata: " << oidx;
}
if (iidx >= layer->outData[oidx]->getInputTo().size()) {
if (iidx >= getInputTo(layer->outData[oidx]).size()) {
if (bOnlyCheck) return {nullptr, 0};
THROW_GNA_LAYER_EXCEPTION(layer) << " no next output layer for outdata: " << oidx << " and inputTo index: " << iidx;
}
auto outLayer = layer->outData[oidx]->getInputTo().begin();
auto outLayer = getInputTo(layer->outData[oidx]).begin();
std::advance(outLayer, iidx);
if (!shouldSkip(outLayer->second)) {
@ -194,10 +194,10 @@ inline std::pair<InferenceEngine::CNNLayerPtr, int> CNNNetCheckNextLayerSkipCer
std::vector<std::map<std::string, CNNLayerPtr>> start;
if (oDataIdx == -1) {
for (int i = 0; i != layer->outData.size(); i++) {
start.push_back(layer->outData[i]->getInputTo());
start.push_back(getInputTo(layer->outData[i]));
}
} else {
start.push_back(layer->outData[oDataIdx]->getInputTo());
start.push_back(getInputTo(layer->outData[oDataIdx]));
}
auto separate_layers = [&currentSet, &resultSet, &shouldSkip](std::map<std::string, CNNLayerPtr>& inputTo) {
@ -220,14 +220,14 @@ inline std::pair<InferenceEngine::CNNLayerPtr, int> CNNNetCheckNextLayerSkipCer
}
for (int i = startIdx; i != endIdx; i++) {
separate_layers(layer->outData[i]->getInputTo());
separate_layers(getInputTo(layer->outData[i]));
}
while (!currentSet.empty()) {
auto currentLayer = currentSet.front();
currentSet.pop_front();
for (auto && oData : currentLayer->outData) {
separate_layers(oData->getInputTo());
separate_layers(getInputTo(oData));
}
}
return resultSet;
@ -276,8 +276,8 @@ inline void CNNNetSwapLayers(InferenceEngine::CNNLayerPtr lhs,
THROW_IE_EXCEPTION << "Unsupported layer for swap operation : " << rhs->name;
}
auto &rhs_outputs = rhs->outData.front()->getInputTo();
auto &lhs_outputs = lhs->outData.front()->getInputTo();
auto &rhs_outputs = getInputTo(rhs->outData.front());
auto &lhs_outputs = getInputTo(lhs->outData.front());
// fixing input layers edges
for (int i = 0; true; i++) {
@ -287,7 +287,7 @@ inline void CNNNetSwapLayers(InferenceEngine::CNNLayerPtr lhs,
if (prev_lhs == rhs) continue;
for (auto &prev_next : prev_lhs->outData) {
auto lhs_ptr = prev_next->getInputTo().find(lhs->name);
auto lhs_ptr = getInputTo(prev_next).find(lhs->name);
lhs_ptr->second = rhs;
}
}
@ -299,7 +299,7 @@ inline void CNNNetSwapLayers(InferenceEngine::CNNLayerPtr lhs,
if (prev_rhs == lhs) continue;
for (auto &prev_next : prev_rhs->outData) {
auto lhs_ptr = prev_next->getInputTo().find(rhs->name);
auto lhs_ptr = getInputTo(prev_next).find(rhs->name);
lhs_ptr->second = lhs;
}
}
@ -310,13 +310,13 @@ inline void CNNNetSwapLayers(InferenceEngine::CNNLayerPtr lhs,
bool hasHrsConnection = false;
for (auto &ins_for_lhs_next : next_lhs.second->insData) {
if (ins_for_lhs_next.lock()->getCreatorLayer().lock() != rhs ) continue;
if (getCreatorLayer(ins_for_lhs_next.lock()).lock() != rhs ) continue;
hasHrsConnection = true;
break;
}
if (!hasHrsConnection) {
for (auto &ins_for_lhs_next : next_lhs.second->insData) {
if (ins_for_lhs_next.lock()->getCreatorLayer().lock() != lhs) continue;
if (getCreatorLayer(ins_for_lhs_next.lock()).lock() != lhs) continue;
ins_for_lhs_next = rhs->outData.front();
}
}
@ -327,13 +327,13 @@ inline void CNNNetSwapLayers(InferenceEngine::CNNLayerPtr lhs,
bool hasLHSConnection = false;
for (auto &ins_for_rhs_next : next_rhs.second->insData) {
if (ins_for_rhs_next.lock()->getCreatorLayer().lock() != lhs) continue;
if (getCreatorLayer(ins_for_rhs_next.lock()).lock() != lhs) continue;
hasLHSConnection = true;
break;
}
if (!hasLHSConnection) {
for (auto &ins_for_rhs_next : next_rhs.second->insData) {
if (ins_for_rhs_next.lock()->getCreatorLayer().lock() != rhs) continue;
if (getCreatorLayer(ins_for_rhs_next.lock()).lock() != rhs) continue;
ins_for_rhs_next = lhs->outData.front();
}
}
@ -389,7 +389,7 @@ inline void CNNNetSwapLayers(InferenceEngine::CNNLayerPtr lhs,
InferenceEngine::CNNLayerPtr creator = nullptr;
auto data = weakData.lock();
if (data != nullptr)
creator = data->getCreatorLayer().lock();
creator = getCreatorLayer(data).lock();
interConnectBackL2R |= creator == rhs;
return creator == rhs;
});
@ -400,8 +400,8 @@ inline void CNNNetSwapLayers(InferenceEngine::CNNLayerPtr lhs,
details::erase_if(rhs->insData, [&interConnectBackR2L, &lhs](DataWeakPtr weakData) {
auto data = weakData.lock();
IE_ASSERT(data != nullptr);
interConnectBackR2L |= data->getCreatorLayer().lock() == lhs;
return data->getCreatorLayer().lock() == lhs;
interConnectBackR2L |= getCreatorLayer(data).lock() == lhs;
return getCreatorLayer(data).lock() == lhs;
});
}
@ -448,7 +448,7 @@ inline void CNNNetworkInsertLayer(CNNLayerPtr after,
--outDataIndex;
continue;
}
auto inputTo = data->getInputTo();
auto inputTo = getInputTo(data);
for (auto inputIt = inputTo.begin(); inputIt != inputTo.end(); ++inputIt) {
auto input = inputIt->second;
if (before != nullptr && input.get() != before.get())
@ -459,26 +459,26 @@ inline void CNNNetworkInsertLayer(CNNLayerPtr after,
input->insData[x] = layerToInsert->outData.front();
}
layerToInsert->outData.front()->getInputTo()[inputIt->first] = input;
getInputTo(layerToInsert->outData.front())[inputIt->first] = input;
bLocated = true;
// erasing only one particular connection
data->getInputTo().erase(inputIt->first);
getInputTo(data).erase(inputIt->first);
if (before != nullptr) {
break;
}
}
if (data->getInputTo().empty()) {
if (getInputTo(data).empty()) {
bLocated = true;
}
if (bLocated) {
// erasing all connection
if (before == nullptr) {
data->getInputTo().clear();
getInputTo(data).clear();
}
data->getInputTo()[layerToInsert->outData.front()->getName()] = layerToInsert;
getInputTo(data)[layerToInsert->outData.front()->getName()] = layerToInsert;
layerToInsert->insData.push_back(data);
break;
}
@ -493,14 +493,14 @@ inline void CNNNetworkInsertLayer(CNNLayerPtr after,
IE_ASSERT(before->insData.size() == 1);
auto prevLayer = after;
for (auto idx = prevLayer->outData.begin(); idx != prevLayer->outData.end(); idx++) {
auto &outputports = (*idx)->getInputTo();
auto &outputports = getInputTo(*idx);
for (auto ll = outputports.begin(); ll != outputports.end(); ll++) {
if (ll->second.get() == before.get()) {
// looks we found where need to remove
outputports.erase(ll);
before->insData.clear();
before->insData.push_back(layerToInsert->outData.front());
layerToInsert->outData.front()->getInputTo()[before->name] = before;
getInputTo(layerToInsert->outData.front())[before->name] = before;
bLocated = true;
break;
@ -516,8 +516,8 @@ inline void CNNNetworkInsertLayer(CNNLayerPtr after,
// inserting into node that doesnt have child
IE_ASSERT(!after->outData.empty());
for (auto &&next : after->outData) {
if (!next->getInputTo().empty()) continue;
next->getInputTo()[layerToInsert->name] = layerToInsert;
if (!getInputTo(next).empty()) continue;
getInputTo(next)[layerToInsert->name] = layerToInsert;
layerToInsert->insData.push_back(next);
}
}
@ -580,22 +580,22 @@ inline void CNNNetworkRemoveLayer(CNNLayerPtr layer) {
}
// remove isp->layer connection
for (auto i = isp->getInputTo().begin(); i != isp->getInputTo().end(); i++) {
for (auto i = getInputTo(isp).begin(); i != getInputTo(isp).end(); i++) {
if (i->second.get() == layer.get()) {
isp->getInputTo().erase(i);
getInputTo(isp).erase(i);
break;
}
}
// remove osp->layer connection
for (auto && outData : osp->getInputTo()) {
for (auto && outData : getInputTo(osp)) {
for (auto i = outData.second->insData.begin(); i != outData.second->insData.end(); i++) {
auto insData = i->lock();
if (!insData) {
THROW_IE_EXCEPTION << "Cannot remove layer : "<< layer->name <<", its output layer(" <<
outData.first << " has invalid input configuration";
}
auto creator = insData->getCreatorLayer().lock();
auto creator = getCreatorLayer(insData).lock();
if (!creator) {
THROW_IE_EXCEPTION << "Cannot remove layer : "<< layer->name <<", its output layer(" <<
outData.first << " has invalid input configuration";
@ -610,13 +610,13 @@ inline void CNNNetworkRemoveLayer(CNNLayerPtr layer) {
}
// add isp->osp connections
for (auto && outData : osp->getInputTo()) {
for (auto && outData : getInputTo(osp)) {
// new syntetic name to avoid duplicates in map
isp->getInputTo()[layer->name + "_" + outData.first] = outData.second;
getInputTo(isp)[layer->name + "_" + outData.first] = outData.second;
}
// add osp->isp connections
for (auto && outData : osp->getInputTo()) {
for (auto && outData : getInputTo(osp)) {
outData.second->insData.push_back(isp);
}

View File

@ -521,7 +521,7 @@ void GNAPlugin::LoadNetwork(ICNNNetwork &network) {
int portId = 0;
for (auto && outPort : outputsDataMap) {
// gets output layer pointer in original topology not in cloned
auto outLayer = outPort.second->getCreatorLayer().lock();
auto outLayer = getCreatorLayer(outPort.second).lock();
// Memory layers are not dnnComponents hence we need to make switch with identity layer
if (outLayer->type == "Memory") {
@ -1261,7 +1261,7 @@ void GNAPlugin::QueryNetwork(const InferenceEngine::ICNNNetwork& network,
THROW_GNA_EXCEPTION << "Network is empty (GNA)\n";
}
auto const & secondLayers = inputs.begin()->second->getInputData()->getInputTo();
auto const & secondLayers = getInputTo(inputs.begin()->second->getInputData());
if (secondLayers.empty()) {
THROW_GNA_EXCEPTION << "Network consists of input layer only (GNA)\n";
}

View File

@ -60,7 +60,7 @@ class UpstreamLayersIterator {
if (!data) {
THROW_GNA_EXCEPTION << "Cannot lock insData for layer: " << origin->name;
}
auto parent = data->getCreatorLayer().lock();
auto parent = getCreatorLayer(data).lock();
if (!parent) {
THROW_GNA_EXCEPTION << "Cannot getParent for layer: " << origin->name;
}

View File

@ -131,7 +131,7 @@ class LayerInfo {
}
bool isOutput() const noexcept {
for (auto& out : layer->outData) {
if (out->getInputTo().empty()) {
if (getInputTo(out).empty()) {
return true;
}
}

View File

@ -40,7 +40,7 @@ bool GNAPluginNS::AreLayersSupported(InferenceEngine::ICNNNetwork& network, std:
return false;
}
auto & secondLayers = inputs.begin()->second->getInputData()->getInputTo();
auto & secondLayers = getInputTo(inputs.begin()->second->getInputData());
if (secondLayers.empty()) {
errMessage = "Network consists of input layer only (GNA)\n";
return false;

View File

@ -85,7 +85,7 @@ static void insertDiagonalLayerBetween(InferenceEngine::CNNLayerPtr prevLayer,
auto diagonalWithQuant = quantized ?
InferenceEngine::injectData<QuantizedLayerParams>(diagLayer) : diagLayer;
dataPtr->getCreatorLayer() = diagonalWithQuant;
getCreatorLayer(dataPtr) = diagonalWithQuant;
diagonalWithQuant->outData.push_back(dataPtr);
// actual insertion
@ -108,7 +108,7 @@ static CNNLayerPtr InsertCopyLayer(CNNLayerPtr prevLayer, CNNLayerPtr nextLayer,
auto copyWithQuant = quantized ?
InferenceEngine::injectData<QuantizedLayerParams>(copyLayer) :
copyLayer;
dataPtr->getCreatorLayer() = copyWithQuant;
getCreatorLayer(dataPtr) = copyWithQuant;
copyWithQuant->outData.push_back(dataPtr);
CNNNetworkInsertLayer(prevLayer, nextLayer, copyWithQuant);
return copyWithQuant;
@ -248,7 +248,7 @@ void HandleMultipleActivationsForTheLayerPass::run() {
CNNLayerSet activations;
for (auto && odata : l->outData) {
for (auto && inputTo : odata->getInputTo()) {
for (auto && inputTo : getInputTo(odata)) {
LayerInfo info(inputTo.second);
if (info.isActivation()) {
@ -291,11 +291,11 @@ void ReorderMaxPoolPass::run() {
void SubstituteSoftSignPass::run() {
auto hasNChildren = [](CNNLayerPtr l, int N){
if (l->outData.size() != 1) return false;
if (l->outData.front()->getInputTo().size() != N) return false;
if (getInputTo(l->outData.front()).size() != N) return false;
return true;
};
auto getNthChild = [](CNNLayerPtr l, int N) {
auto first = l->outData.front()->getInputTo().begin();
auto first = getInputTo(l->outData.front()).begin();
std::advance(first, N);
return first->second;
};
@ -348,14 +348,14 @@ void SubstituteSoftSignPass::run() {
// rebind outdata of mull to be outdata of softsign
for (auto && data : mulData) {
data->getCreatorLayer() = activationLayerWithQuant;
getCreatorLayer(data) = activationLayerWithQuant;
data->setName("softsign_data_" + std::to_string(getPassManager()->getIntVar(softSignLayersCounter)));
activationLayerWithQuant->outData.push_back(data);
}
// making connection l->softsign
l->outData.front()->getInputTo().clear();
l->outData.front()->getInputTo()[layerName] = activationLayerWithQuant;
getInputTo(l->outData.front()).clear();
getInputTo(l->outData.front())[layerName] = activationLayerWithQuant;
// making back connection softsign->mul
activationLayerWithQuant->insData.push_back(l->outData.front());
@ -382,14 +382,14 @@ void SubstitutePReluPass::run() {
CNNLayer* next = nullptr;
if (layer == nullptr) return next;
if (layer->outData.size() != 1) return next;
return layer->outData[0]->getInputTo().begin()->second.get();
return getInputTo(layer->outData[0]).begin()->second.get();
};
// TODO: unit tests for bad cases
for (auto & l : *pLayers) {
// assume l is starting layer, that is followed by eltwise_sum(relu, negate/relu/scale/negate)
if (l->outData.size() != 1) continue;
auto &outputLayers = l->outData[0]->getInputTo();
auto &outputLayers = getInputTo(l->outData[0]);
if (outputLayers.size() != 2) continue;
// one of followed layers need to be generic relu
@ -428,11 +428,11 @@ void SubstitutePReluPass::run() {
auto inData_0 = sum->insData[0].lock();
IE_ASSERT(inData_0 != nullptr);
auto creatorLayer_0 = inData_0->getCreatorLayer().lock();
auto creatorLayer_0 = getCreatorLayer(inData_0).lock();
IE_ASSERT(creatorLayer_0 != nullptr);
auto inData_1 = sum->insData[1].lock();
IE_ASSERT(inData_1 != nullptr);
auto creatorLayer_1 = inData_1->getCreatorLayer().lock();
auto creatorLayer_1 = getCreatorLayer(inData_1).lock();
IE_ASSERT(creatorLayer_1 != nullptr);
auto s1 = creatorLayer_0.get();
@ -452,10 +452,10 @@ void SubstitutePReluPass::run() {
// pointing relu to output of eltwise_summ
relu1->outData = sum->outData;
// changing creator layer
relu1->outData[0]->getCreatorLayer() = relu1;
getCreatorLayer(relu1->outData[0]) = relu1;
// pointing back to relu if any
if (!relu1->outData[0]->getInputTo().empty()) {
auto summOutputLayer = relu1->outData[0]->getInputTo().begin()->second;
if (!getInputTo(relu1->outData[0]).empty()) {
auto summOutputLayer = getInputTo(relu1->outData[0]).begin()->second;
summOutputLayer->insData.clear();
summOutputLayer->insData.push_back(relu1->outData[0]);
}
@ -489,10 +489,10 @@ void ReversePermutationsPass::run() {
if (layer->outData.empty()) {
return nullptr;
}
if (layer->outData.front()->getInputTo().size() != 1) {
if (getInputTo(layer->outData.front()).size() != 1) {
return nullptr;
}
auto next = layer->outData.front()->getInputTo().begin()->second;
auto next = getInputTo(layer->outData.front()).begin()->second;
if (LayerInfo(next).isNonFunctional()) return nextLayerSkipReshape(next);
@ -582,12 +582,12 @@ void InsertIdentityLayerPass::run() {
auto activationLayerWithQuant = quantized ?
InferenceEngine::injectData<QuantizedLayerParams>(activationLayer) :
activationLayer;
dataPtr->getCreatorLayer() = activationLayerWithQuant;
getCreatorLayer(dataPtr) = activationLayerWithQuant;
activationLayerWithQuant->outData.push_back(dataPtr);
// wether 1 identity or all outputs TODO possible grouping here, need to implement special groupped inserter
bool notAll = false;
for (auto && nextData : prev->outData) {
for (auto && nextLayer : nextData->getInputTo()) {
for (auto && nextLayer : getInputTo(nextData)) {
if (nextLayer.second.get() == l.get())
continue;
if (getCandidatesForIdentityInsertion(nextLayer.second).empty()) {
@ -622,7 +622,7 @@ void InsertCopyLayerPass::run() {
if (LayerInfo(l).isMemory()) {
if (LayerInfo(prevIndirectLayer).isConcat()) { bInsert = true;}
// memory usualy preceded by either activation or split, or other layers in order to have 2b precision
for (auto && inputto : prevLayers[i].first->outData[prevLayers[i].second]->getInputTo()) {
for (auto && inputto : getInputTo(prevLayers[i].first->outData[prevLayers[i].second])) {
// if preceding layer is common for memory and concat
if (LayerInfo(inputto.second).isConcat()) {
bInsert = true;
@ -687,7 +687,7 @@ void InsertConcatAligningFilterPass::run() {
auto useAlignFilterIf = [&concatLayer, &getLayerByIndex](int concat_input_idx) {
if (concatLayer->insData.size() <= concat_input_idx) return false;
auto nextInput = getLayerByIndex(concat_input_idx)->getCreatorLayer().lock();
auto nextInput = getCreatorLayer(getLayerByIndex(concat_input_idx)).lock();
if (LayerInfo(nextInput).isInput()) return false;
@ -697,7 +697,7 @@ void InsertConcatAligningFilterPass::run() {
// correcting offset by copy layer insertion. This can be improved by collapsing copy and affine or diagonal later-on
// if next concat inputs requires align filter - then current input also requires either copy or align filter
if (ALIGN64(offset) != offset || (ALIGN64(outputSize) != outputSize && useAlignFilterIf(input_idx + 1))) {
auto prevLayer = concatInput->getCreatorLayer().lock();
auto prevLayer = getCreatorLayer(concatInput).lock();
// input layer parameters are copied not using GNA-primitives - so nothing to allign here.
if (!useAlignFilterIf(input_idx)) continue;
@ -755,7 +755,7 @@ void InsertConcatAligningFilterPass::run() {
auto filterWithQuant = quantized ?
InferenceEngine::injectData<QuantizedLayerParams>(concatAligningFilter) :
concatAligningFilter;
outData->getCreatorLayer() = filterWithQuant;
getCreatorLayer(outData) = filterWithQuant;
filterWithQuant->outData.push_back(outData);
CNNNetworkInsertLayer(prevLayer, l, filterWithQuant);
@ -784,7 +784,7 @@ void ReorderConcatInputsPass::run() {
if (l->outData.size() != 1) {
THROW_GNA_EXCEPTION << "no concat layer after concat aligning layer" << l->name;
}
auto nextLayers = l->outData.front()->getInputTo();
auto nextLayers = getInputTo(l->outData.front());
if (nextLayers.size() != 1) {
THROW_GNA_EXCEPTION << "Invalid concat connection in align filter : " << l->name;
@ -842,15 +842,15 @@ void ReorderConcatInputsPass::run() {
TensorDesc(Precision::FP32,
SizeVector({1}),
Layout::C));
linkOutData->getCreatorLayer() = link;
getCreatorLayer(linkOutData) = link;
link->outData.push_back(linkOutData);
link->insData.push_back(l->outData.front());
linkOutData->getInputTo()[firstInputToConcat->name + ".via.link"] = firstInputToConcat;
getInputTo(linkOutData)[firstInputToConcat->name + ".via.link"] = firstInputToConcat;
firstInputToConcat->insData.push_back(linkOutData);
l->outData.front()->getInputTo()[linkName] = link;
getInputTo(l->outData.front())[linkName] = link;
}
}
@ -876,8 +876,8 @@ void InsertSplitAligningFilterPass::run() {
#ifdef PLOT
// getting list of layers attached to current split output
gnalog() << "Inserted Affine Filter Layer between: " << l->name << " and ";
for (auto &&followingLayers : splitOutput->getInputTo()) {
if (splitOutput->getInputTo().size() != 1) {
for (auto &&followingLayers : getInputTo(splitOutput)) {
if (getInputTo(splitOutput).size() != 1) {
gnalog() << "\n ";
}
gnalog() << followingLayers.second->name;
@ -931,7 +931,7 @@ void InsertSplitAligningFilterPass::run() {
auto filterWithQuant = quantized ?
InferenceEngine::injectData<QuantizedLayerParams>(filterLayer) :
filterLayer;
outData->getCreatorLayer() = filterWithQuant;
getCreatorLayer(outData) = filterWithQuant;
filterWithQuant->outData.push_back(outData);
CNNNetworkInsertLayer(l, nullptr, filterWithQuant, splitOutIndex);
}
@ -1113,9 +1113,9 @@ void FuseMultipleIdentitiesPass::run() {
} else {
// just figure out how to connect to that "already identity"
// 1st stage - disconnect given layer from previous
auto directPrev = l->insData.front().lock()->getCreatorLayer().lock();
auto directPrev = getCreatorLayer(l->insData.front().lock()).lock();
auto oDataIdx = CNNLayerFindOutDataIdx(directPrev, 0);
auto &inputTo = directPrev->outData[oDataIdx]->getInputTo();
auto &inputTo = getInputTo(directPrev->outData[oDataIdx]);
for (auto inIterator = inputTo.begin(); inIterator != inputTo.end(); inIterator++) {
if (inIterator->second == l) {
inputTo.erase(inIterator);
@ -1126,7 +1126,7 @@ void FuseMultipleIdentitiesPass::run() {
//2nd stage - now setting up new connection
l->insData.push_back(alreadyIdentity->outData.front());
alreadyIdentity->outData.front()->getInputTo()[l->name] = l;
getInputTo(alreadyIdentity->outData.front())[l->name] = l;
}
}
}

View File

@ -33,7 +33,7 @@ void translateVisitLayer(VisitedLayersMap& visited,
}
visited.insert({layer, node});
for (auto&& data : layer->outData) {
for (auto&& layerIt : data->getInputTo()) {
for (auto&& layerIt : getInputTo(data)) {
auto nextLayer = layerIt.second;
auto it = visited.find(nextLayer);
if (visited.end() == it) {
@ -51,7 +51,7 @@ void translateNetworkToAde(ade::Graph& gr, ICNNNetwork& network) {
VisitedLayersMap visited;
for (auto& data : getRootDataObjects(network)) {
assert(nullptr != data);
for (auto& layerIt : data->getInputTo()) {
for (auto& layerIt : getInputTo(data)) {
auto layer = layerIt.second;
assert(nullptr != layer);
if (!ade::util::contains(visited, layer)) {

View File

@ -44,7 +44,7 @@ namespace {
void forward(const CNNLayerPtr& layer, std::deque<InferenceEngine::CNNLayerPtr>& layers) {
for (const auto& out : layer->outData) {
for (const auto& out_link : out->getInputTo()) {
for (const auto& out_link : getInputTo(out)) {
const auto& nextLayer = out_link.second;
if (nullptr != nextLayer) {
layers.emplace_back(nextLayer);
@ -82,7 +82,7 @@ void traverse(InferenceEngine::ICNNNetwork& network,
network.getInputsInfo(inputs);
for (const auto& input : inputs) {
const auto data = input.second->getInputData();
for (const auto& to : data->getInputTo()) {
for (const auto& to : getInputTo(data)) {
const auto nextLayer = to.second;
assert(nullptr != nextLayer);
layers.emplace_back(nextLayer);
@ -868,7 +868,7 @@ void HeteroExecutableNetwork::ExportImpl(std::ostream& heteroModel) {
auto outputInfo = subnetwork._clonedNetwork.getOutputsInfo();
for (auto&& output : outputInfo) {
auto outputNode = subnetworkOutputsNode.append_child("output");
auto creator = output.second->getCreatorLayer().lock();
auto creator = getCreatorLayer(output.second).lock();
outputNode.append_attribute("creatorName").set_value(creator->name.c_str());
outputNode.append_attribute("name").set_value(output.first.c_str());
outputNode.append_attribute("precision").set_value(output.second->getPrecision().name());

View File

@ -197,7 +197,7 @@ void sortSubgraphs(std::vector<LayersSet>& subgraphs) {
for (auto&& dataIt : layer->insData) {
auto data = dataIt.lock();
assert(nullptr != data);
auto prevLayer = data->getCreatorLayer().lock();
auto prevLayer = getCreatorLayer(data).lock();
if (nullptr != prevLayer) {
for (auto j : ade::util::iota(subgraphs.size())) {
if (i != j) {

View File

@ -149,20 +149,9 @@ public:
network = nullptr;
}
CNNLayerWeakPtr& getCreatorLayer() override {
if (Data::getCreatorLayer().lock() == nullptr && network != nullptr) {
network->convertToCNNNetworkImpl();
}
return Data::getCreatorLayer();
}
CNNLayerWeakPtr& getCreatorLayer();
std::map<std::string, CNNLayerPtr>& getInputTo() override {
if (Data::getInputTo().empty() && network != nullptr) {
network->convertToCNNNetworkImpl();
}
return Data::getInputTo();
}
std::map<std::string, CNNLayerPtr>& getInputTo();
private:
CNNNetworkNGraphImpl* network;

View File

@ -2,14 +2,14 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "ie_data.h"
#include "ie_layers.h"
#include <map>
#include <memory>
#include <string>
#include "blob_factory.hpp"
#include "ie_layers.h"
#include "cnn_network_ngraph_impl.hpp"
using namespace InferenceEngine;
@ -17,10 +17,27 @@ Blob::Ptr Blob::CreateFromData(const DataPtr& data) {
return CreateBlobFromData(data);
}
Data::Data(const std::string& name, Precision _precision, Layout layout)
: name(name), userObject({0}), tensorDesc(_precision, layout) {}
struct Data::Impl {
/**
* @brief A pointer to the layer that creates this data element, null for input data elements
*/
CNNLayerWeakPtr creatorLayer;
/**
* @brief A map of layers that use this node as input.
* It is useful for recursive NN graph traversal.
*/
std::map<std::string, CNNLayerPtr> inputTo;
};
Data::Data(const std::string& name, const TensorDesc& desc): name(name), userObject({0}), tensorDesc(desc) {}
Data::Data(const std::string& name, Precision _precision, Layout layout)
: name(name), userObject({0}), tensorDesc(_precision, layout) {
_impl = std::make_shared<Impl>();
}
Data::Data(const std::string& name, const TensorDesc& desc): name(name), userObject({0}), tensorDesc(desc) {
_impl = std::make_shared<Impl>();
}
const Precision& Data::getPrecision() const {
return tensorDesc.getPrecision();
@ -46,8 +63,24 @@ void Data::reshape(const SizeVector& a_dims, Layout a_layout) {
tensorDesc.reshape(a_dims, a_layout);
}
CNNLayerWeakPtr& Data::getCreatorLayer() {
return creatorLayer;
Data::Data(const Data& data) :
name(data.name), userObject(data.userObject), tensorDesc(data.tensorDesc) {
_impl = std::make_shared<Impl>();
_impl->creatorLayer = data._impl->creatorLayer;
_impl->inputTo = data._impl->inputTo;
}
Data & Data::operator = (const Data& data) {
if (this != &data) {
name = data.name;
userObject = data.userObject;
tensorDesc = data.tensorDesc;
_impl->creatorLayer = data._impl->creatorLayer;
_impl->inputTo = data._impl->inputTo;
}
return *this;
}
const std::string& Data::getName() const {
@ -58,10 +91,6 @@ void Data::setName(const std::string& newName) {
name = newName;
}
std::map<std::string, CNNLayerPtr>& Data::getInputTo() {
return inputTo;
}
const UserValue& Data::getUserObject() const {
return userObject;
}
@ -77,3 +106,44 @@ void Data::setPrecision(const Precision& precision) {
const SizeVector& Data::getDims() const {
return tensorDesc.getDims();
}
// compatibility
CNNLayerWeakPtr& InferenceEngine::getCreatorLayer(const DataPtr & data) {
if (auto ndata = std::dynamic_pointer_cast<details::NGraphData>(data)) {
return ndata->getCreatorLayer();
} else {
return data->_impl->creatorLayer;
}
}
std::map<std::string, CNNLayerPtr>& InferenceEngine::getInputTo(const DataPtr & data) {
if (auto ndata = std::dynamic_pointer_cast<details::NGraphData>(data)) {
return ndata->getInputTo();
} else {
return data->_impl->inputTo;
}
}
std::map<std::string, CNNLayerPtr>& InferenceEngine::getInputTo(Data * data) {
if (auto ndata = dynamic_cast<details::NGraphData *>(data)) {
return ndata->getInputTo();
} else {
return data->_impl->inputTo;
}
}
CNNLayerWeakPtr& details::NGraphData::getCreatorLayer() {
if (_impl->creatorLayer.lock() == nullptr && network != nullptr) {
network->convertToCNNNetworkImpl();
}
return _impl->creatorLayer;
}
std::map<std::string, CNNLayerPtr>& details::NGraphData::getInputTo() {
if (_impl->inputTo.empty() && network != nullptr) {
network->convertToCNNNetworkImpl();
}
return _impl->inputTo;
}

View File

@ -10,6 +10,7 @@
#include <string>
#include <vector>
#include "ie_layers.h"
#include "ie_ishape_infer_extension.hpp"
#include "description_buffer.hpp"
#include "ie_api.h"

View File

@ -14,6 +14,7 @@
#include <utility>
#include "ie_api.h"
#include "ie_layers.h"
#include "ie_icnn_network.hpp"
#include "ie_locked_memory.hpp"
@ -57,7 +58,7 @@ public:
InputsDataMap inputs;
network->getInputsInfo(inputs);
if (!inputs.empty()) {
auto& nextLayers = inputs.begin()->second->getInputData()->getInputTo();
auto& nextLayers = getInputTo(inputs.begin()->second->getInputData());
if (!nextLayers.empty()) {
currentLayer = nextLayers.begin()->second;
nextLayersTovisit.push_back(currentLayer);
@ -134,7 +135,7 @@ private:
// visit child that not visited
for (auto&& output : nextLayer->outData) {
for (auto&& child : output->getInputTo()) {
for (auto&& child : getInputTo(output)) {
if (visited.find(child.second.get()) == visited.end()) {
nextLayersTovisit.push_back(child.second);
visited.insert(child.second.get());
@ -144,7 +145,7 @@ private:
// visit parents
for (auto&& parent : nextLayer->insData) {
auto parentLayer = parent.lock()->getCreatorLayer().lock();
auto parentLayer = getCreatorLayer(parent.lock()).lock();
if (parentLayer && visited.find(parentLayer.get()) == visited.end()) {
nextLayersTovisit.push_back(parentLayer);
visited.insert(parentLayer.get());

View File

@ -74,7 +74,7 @@ public:
return;
}
currentIterator++;
if (currentIterator != dataCntIteratorCurrent->get()->getInputTo().end()) {
if (currentIterator != getInputTo(dataCntIteratorCurrent->get()).end()) {
return;
}
@ -90,8 +90,8 @@ protected:
void moveToNextNonEmptyData() {
pointingToEnd = true;
for (; dataCntIteratorCurrent != dataCntIteratorEnd; dataCntIteratorCurrent++) {
if (!dataCntIteratorCurrent->get()->getInputTo().empty()) {
currentIterator = dataCntIteratorCurrent->get()->getInputTo().begin();
if (!getInputTo(dataCntIteratorCurrent->get()).empty()) {
currentIterator = getInputTo(dataCntIteratorCurrent->get()).begin();
pointingToEnd = false;
break;
}
@ -183,7 +183,7 @@ inline void UnorderedDFS(std::unordered_set<CNNLayer*>& visited, const Inference
// visit childs
for (auto& od : cnnLayer->outData) {
for (auto nl : od->getInputTo()) {
for (auto nl : getInputTo(od)) {
layers.push(nl.second);
}
}
@ -194,7 +194,7 @@ inline void UnorderedDFS(std::unordered_set<CNNLayer*>& visited, const Inference
if (!input.lock()) {
THROW_IE_EXCEPTION << "Data " << i << " inserted into layer " << cnnLayer->name << " is nullptr";
} else {
auto creatorLayer = input.lock()->getCreatorLayer().lock();
auto creatorLayer = getCreatorLayer(input.lock()).lock();
if (creatorLayer) {
layers.push(creatorLayer);
}
@ -225,7 +225,7 @@ inline void BFS(InferenceEngine::CNNLayerPtr layer, const T& visit, int maxDepth
for (; !nextLayers.empty() && maxDepth != 0;) {
visit(*nextLayers.begin());
for (auto& od : (*nextLayers.begin())->outData) {
for (auto nl : od->getInputTo()) {
for (auto nl : getInputTo(od)) {
if (visited.find(nl.second.get()) == visited.end()) {
nextLayers.push_back(nl.second);
visited.insert(nl.second.get());
@ -269,7 +269,7 @@ template <class T>
inline bool CNNNetForestDFS(const std::vector<DataPtr>& heads, const T& visit, bool bVisitBefore) {
std::unordered_map<CNNLayer*, bool> visited;
for (const auto& in : heads) {
for (const auto& to : in->getInputTo()) {
for (const auto& to : getInputTo(in)) {
if (visited.find(to.second.get()) != visited.end()) continue;
if (!details::DFS(visited, to.second, visit, bVisitBefore)) {
return false;
@ -345,7 +345,7 @@ inline bool CNNNetHasPrevLayer(const InferenceEngine::CNNLayer* layer, int idx =
return false;
}
auto prevData = layer->insData[idx].lock();
return !!prevData->getCreatorLayer().lock();
return !!getCreatorLayer(prevData).lock();
}
/**
@ -376,7 +376,7 @@ inline CNNLayerSet CNNNetGetAllInputLayers(const ICNNNetwork& network) {
if (inputs.empty()) return inputLayers;
for (const auto& input : inputs) {
auto& secondLayers = input.second->getInputData()->getInputTo();
auto& secondLayers = getInputTo(input.second->getInputData());
if (secondLayers.empty()) continue;
@ -476,7 +476,7 @@ inline CNNNetPtr CNNNetCopy(const ICNNNetwork& input, const Copier& cp) {
// internal utility to locate out data idx in layer
auto findOutDataIdx = [&](DataPtr sourceData) {
int dataIdx = -1;
auto sourceLayer = sourceData->getCreatorLayer().lock();
auto sourceLayer = getCreatorLayer(sourceData).lock();
if (!sourceLayer) {
THROW_IE_EXCEPTION << "Data " << sourceData->getName() << " has no creator layer";
}
@ -515,7 +515,7 @@ inline CNNNetPtr CNNNetCopy(const ICNNNetwork& input, const Copier& cp) {
// internal utility to locate input data idx in layer
auto findInsDataIdx = [&](DataPtr sourceData, CNNLayerPtr layer) {
int dataIdx = -1;
auto sourceLayerMap = sourceData->getInputTo();
auto sourceLayerMap = getInputTo(sourceData);
for (auto& layersMapping : sourceLayerMap) {
if (layersMapping.second.get() != layer.get()) {
continue;
@ -540,13 +540,13 @@ inline CNNNetPtr CNNNetCopy(const ICNNNetwork& input, const Copier& cp) {
auto newLayer = oldToNewLayers[current.get()];
// remap output data
for (size_t i = 0; i != current->outData.size(); i++) {
newLayer->outData[i]->getCreatorLayer() = CNNLayerWeakPtr(newLayer);
getCreatorLayer(newLayer->outData[i]) = CNNLayerWeakPtr(newLayer);
// transfer data info for getData routine
net->getData(newLayer->outData[i]->getName()) = newLayer->outData[i];
for (auto inputTo = std::begin(newLayer->outData[i]->getInputTo());
inputTo != std::end(newLayer->outData[i]->getInputTo()); inputTo++) {
for (auto inputTo = std::begin(getInputTo(newLayer->outData[i]));
inputTo != std::end(getInputTo(newLayer->outData[i])); inputTo++) {
inputTo->second = oldToNewLayers[inputTo->second.get()];
}
}
@ -554,7 +554,7 @@ inline CNNNetPtr CNNNetCopy(const ICNNNetwork& input, const Copier& cp) {
for (size_t i = 0; i != current->insData.size(); i++) {
// found that data IDX
auto sourceData = current->insData[i].lock();
auto sourceLayer = sourceData->getCreatorLayer().lock();
auto sourceLayer = getCreatorLayer(sourceData).lock();
if (!sourceLayer) {
THROW_IE_EXCEPTION << "Data " << sourceData->getName() << " has no creator layer";
}
@ -569,7 +569,7 @@ inline CNNNetPtr CNNNetCopy(const ICNNNetwork& input, const Copier& cp) {
input.getInputsInfo(inputsInfo);
std::set<DataPtr> insDatas;
for (auto&& info : inputsInfo) {
for (auto secondLayer : info.second->getInputData()->getInputTo()) {
for (auto secondLayer : getInputTo(info.second->getInputData())) {
auto secondLayerNew = oldToNewLayers[secondLayer.second.get()];
InputInfo::Ptr infoNew = std::make_shared<InputInfo>();
infoNew->setInputData(
@ -584,7 +584,7 @@ inline CNNNetPtr CNNNetCopy(const ICNNNetwork& input, const Copier& cp) {
input.getOutputsInfo(outmap);
for (auto&& data : outmap) {
ResponseDesc dsc;
if (OK != net->addOutput(data.second->getCreatorLayer().lock()->name, findOutDataIdx(data.second), &dsc)) {
if (OK != net->addOutput(getCreatorLayer(data.second).lock()->name, findOutDataIdx(data.second), &dsc)) {
THROW_IE_EXCEPTION << dsc.msg;
}
}
@ -647,7 +647,7 @@ inline std::vector<DataPtr> CNNSubnetGetAllInputs(const std::vector<DataPtr>& he
// Define all start layers
for (const auto& data : heads) {
auto& secondLayers = data->getInputTo();
auto& secondLayers = getInputTo(data);
if (secondLayers.empty()) continue;
@ -666,7 +666,7 @@ inline std::vector<DataPtr> CNNSubnetGetAllInputs(const std::vector<DataPtr>& he
// layers from head (like const placeholders)
for (auto& starter : inputLayers) {
DataPtr holder(new Data(starter->name + ":input_holder", starter->precision));
holder->getInputTo()[starter->name] = starter;
getInputTo(holder)[starter->name] = starter;
res.push_back(holder);
}

View File

@ -23,6 +23,14 @@
#include "ie_data.h"
#include "ie_layers_property.hpp"
#if defined IMPLEMENT_INFERENCE_ENGINE_API || defined IMPLEMENT_INFERENCE_ENGINE_PLUGIN
# define INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(...) INFERENCE_ENGINE_API_CLASS(__VA_ARGS__)
#else
# define INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(...) \
INFERENCE_ENGINE_INTERNAL("Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2021.1") \
INFERENCE_ENGINE_API_CLASS(__VA_ARGS__)
#endif
namespace ngraph {
class Node;
@ -379,6 +387,11 @@ IE_SUPPRESS_DEPRECATED_START
using GenericLayer = class CNNLayer;
IE_SUPPRESS_DEPRECATED_END
INFERENCE_ENGINE_API_CPP(CNNLayerWeakPtr&) getCreatorLayer(const DataPtr & data);
INFERENCE_ENGINE_API_CPP(std::map<std::string, CNNLayerPtr>&) getInputTo(const DataPtr & data);
INFERENCE_ENGINE_API_CPP(std::map<std::string, CNNLayerPtr>&) getInputTo(Data * data);
IE_SUPPRESS_DEPRECATED_START_WIN
/**

View File

@ -48,7 +48,7 @@ std::map<CNNLayer*, bool> getConstLayersMap(const ICNNNetwork& network) {
THROW_IE_EXCEPTION << "input data is absent";
}
const CNNLayerWeakPtr parentWeak = insData->getCreatorLayer();
const CNNLayerWeakPtr parentWeak = getCreatorLayer(insData);
const CNNLayerPtr parent = parentWeak.lock();
if (parent == nullptr) {
THROW_IE_EXCEPTION << "parentLayer is absent";
@ -92,7 +92,7 @@ CNNNetworkImpl::~CNNNetworkImpl() {
if (!res) {
for (const auto& data : _data) {
if (!data.second) continue;
for (auto& input : data.second->getInputTo()) {
for (auto& input : getInputTo(data.second)) {
if (!input.second) continue;
input.second.reset();
}
@ -149,7 +149,7 @@ void CNNNetworkImpl::renameLayer(const std::string& currentName, const std::stri
bool wasUpdatedInput = false;
for (auto inputDataIt = _inputData.begin(); inputDataIt != _inputData.end(); ++inputDataIt) {
const CNNLayerPtr inputLayer = inputDataIt->second->getInputData()->getCreatorLayer().lock();
const CNNLayerPtr inputLayer = getCreatorLayer(inputDataIt->second->getInputData()).lock();
if (inputLayer->name == currentName) {
_inputData.emplace(newName, inputDataIt->second);
_inputData.erase(inputDataIt);
@ -160,7 +160,7 @@ void CNNNetworkImpl::renameLayer(const std::string& currentName, const std::stri
if (!wasUpdatedInput) {
for (auto outputDataIt = _outputData.begin(); outputDataIt != _outputData.end(); ++outputDataIt) {
const CNNLayerPtr outputLayer = outputDataIt->second->getCreatorLayer().lock();
const CNNLayerPtr outputLayer = getCreatorLayer(outputDataIt->second).lock();
if (outputLayer->name == currentName) {
_outputData.emplace(newName, outputDataIt->second);
_outputData.erase(outputDataIt);
@ -203,14 +203,14 @@ void CNNNetworkImpl::validate(int version) {
for (auto i : layer->insData) {
auto data = i.lock();
if (data) {
auto inputTo = data->getInputTo();
auto inputTo = getInputTo(data);
auto iter = inputTo.find(layerName);
auto dataName = data->getName();
if (iter == inputTo.end()) {
THROW_IE_EXCEPTION << "Data " << data->getName() << " which inserted into the layer "
<< layerName << " does not point at this layer";
}
if (!data->getCreatorLayer().lock()) {
if (!getCreatorLayer(data).lock()) {
THROW_IE_EXCEPTION << "Data " << dataName << " has no creator layer";
}
} else {
@ -218,7 +218,7 @@ void CNNNetworkImpl::validate(int version) {
}
}
for (auto data : layer->outData) {
auto inputTo = data->getInputTo();
auto inputTo = getInputTo(data);
std::string dataName = data->getName();
for (auto layerIter : inputTo) {
CNNLayerPtr layerInData = layerIter.second;
@ -250,7 +250,7 @@ void CNNNetworkImpl::validate(int version) {
std::string inputType = "Input";
for (auto i : inputs) {
CNNLayerPtr layer = i.second->getInputData()->getCreatorLayer().lock();
CNNLayerPtr layer = getCreatorLayer(i.second->getInputData()).lock();
if (layer && !equal(layer->type, inputType)) {
THROW_IE_EXCEPTION << "Input layer " << layer->name << " should have Input type but actually its type is "
<< layer->type;
@ -290,7 +290,7 @@ void CNNNetworkImpl::resolveOutput() {
THROW_IE_EXCEPTION << "data name [" << kvp.first << "] dimensions is not known";
// data nodes not going to any layer are basically graph output...
if (kvp.second->getInputTo().empty()) {
if (getInputTo(kvp.second).empty()) {
_outputData[kvp.first] = kvp.second;
}
}
@ -392,7 +392,7 @@ StatusCode CNNNetworkImpl::setBatchSize(size_t size, ResponseDesc* responseDesc)
const std::map<CNNLayer*, bool> layersMap = getConstLayersMap(*this);
for (auto& layer : _data) {
SizeVector dims = layer.second->getDims();
CNNLayerPtr layerT = layer.second->getCreatorLayer().lock();
CNNLayerPtr layerT = getCreatorLayer(layer.second).lock();
bool constOrAbsent;
if (layerT) {

View File

@ -821,7 +821,7 @@ std::shared_ptr<CNNNetworkImpl> convertFunctionToICNNNetwork(const std::shared_p
TensorDesc::getLayoutByDims(dims)}));
}
ptr->getCreatorLayer() = cnnLayer;
getCreatorLayer(ptr) = cnnLayer;
cnnLayer->outData.push_back(ptr);
if (std::dynamic_pointer_cast<::ngraph::op::Parameter>(layer)) {
keep_input_info(cnnNetworkImpl, ptr);
@ -875,7 +875,7 @@ std::shared_ptr<CNNNetworkImpl> convertFunctionToICNNNetwork(const std::shared_p
<< " (max " << prevCnnLayer->outData.size() << ") of " << prevCnnLayer->type
<< " layer " << prevCnnLayer->name;
cnnLayer->insData[inIndex - count_of_skipped] = prevCnnLayer->outData[output_port.get_index()];
prevCnnLayer->outData[output_port.get_index()]->getInputTo()[cnnLayer->name] = cnnLayer;
getInputTo(prevCnnLayer->outData[output_port.get_index()])[cnnLayer->name] = cnnLayer;
}
}

View File

@ -30,7 +30,7 @@ namespace InferenceEngine {
bool isForFakeQuantize(const CNNLayer& layer) {
for (const DataPtr data : layer.outData) {
for (const auto it : data->getInputTo()) {
for (const auto it : getInputTo(data)) {
const CNNLayerPtr childLayer = it.second;
if (childLayer->type == "FakeQuantize" || childLayer->type == "Quantize") {
return true;
@ -98,12 +98,12 @@ std::vector<CNNLayerPtr> ConstTransformer::foldConstSubgraphsInternal(const std:
if (constLayers.find(layer->name) != constLayers.end()) {
// const layer doesn't need parent connections -> erase them
for (const auto& insData : layer->insData) {
auto& inputTo = insData.lock()->getInputTo();
auto& inputTo = getInputTo(insData.lock());
inputTo.erase(layer->name);
// Note: to resolve corner case above layers can be marked as const with const data, just to be removed
// properly.. and maybe this logic wouldn't be needed
if (inputTo.empty()) {
auto creator = insData.lock()->getCreatorLayer().lock();
auto creator = getCreatorLayer(insData.lock()).lock();
auto it = std::find(creator->outData.begin(), creator->outData.end(), insData.lock());
if (it != creator->outData.end()) {
data_to_remove.push_back(*it);
@ -115,7 +115,7 @@ std::vector<CNNLayerPtr> ConstTransformer::foldConstSubgraphsInternal(const std:
if (constLayers.at(layer->name)) {
for (const auto& outData : layer->outData) {
for (const auto& inputTo : outData->getInputTo()) {
for (const auto& inputTo : getInputTo(outData)) {
CNNLayerPtr inputToLayer;
std::string inputToName;
std::tie(inputToName, inputToLayer) = inputTo;
@ -139,7 +139,7 @@ std::vector<CNNLayerPtr> ConstTransformer::foldConstSubgraphsInternal(const std:
bool keepConstData = layer->outData.size() == 1;
if (keepConstData) {
auto outData = layer->outData[0];
for (const auto& inputTo : outData->getInputTo()) {
for (const auto& inputTo : getInputTo(outData)) {
if (constLayers.find(inputTo.first) != constLayers.end()) {
keepConstData = false;
}
@ -162,7 +162,7 @@ std::vector<CNNLayerPtr> ConstTransformer::foldConstSubgraphsInternal(const std:
layer->precision};
auto newLayer = std::make_shared<CNNLayer>(layerParams);
for (const auto& data : layer->outData) {
data->getCreatorLayer() = newLayer;
getCreatorLayer(data) = newLayer;
}
newLayer->outData = layer->outData;
newLayer->blobs["custom"] = layer->blobs["custom"];
@ -176,7 +176,7 @@ std::vector<CNNLayerPtr> ConstTransformer::foldConstSubgraphsInternal(const std:
}
} else {
for (const auto& outData : layer->outData) {
for (const auto& inputTo : outData->getInputTo()) {
for (const auto& inputTo : getInputTo(outData)) {
CNNLayerPtr inputToLayer;
std::string inputToName;
std::tie(inputToName, inputToLayer) = inputTo;
@ -203,8 +203,8 @@ std::vector<CNNLayerPtr> ConstTransformer::foldConstSubgraphsInternal(const std:
}
auto newData = std::make_shared<Data>(outData->getName() + "__" + inputToName,
outData->getTensorDesc());
newData->getCreatorLayer() = newLayer;
newData->getInputTo()[inputToName] = inputToLayer;
getCreatorLayer(newData) = newLayer;
getInputTo(newData)[inputToName] = inputToLayer;
newLayer->outData = {newData};
layer_to_add.push_back(newLayer);
data_to_add.push_back(newData);
@ -247,7 +247,7 @@ const std::map<std::string, bool> ConstTransformer::getConstLayers(const std::ve
!isForFakeQuantize(*layer)) {
bool isAllInputsConst = true;
for (auto const& data : layer->insData) {
auto creator = data.lock()->getCreatorLayer().lock();
auto creator = getCreatorLayer(data.lock()).lock();
if (creator != nullptr) {
if (mapConstLayers.find(creator->name) == mapConstLayers.end()) {
isAllInputsConst = false;
@ -271,7 +271,7 @@ const std::map<std::string, bool> ConstTransformer::getConstLayers(const std::ve
for (int i = 0; i < currentLayer->insData.size(); i++) {
std::string creatorName;
if (currentLayer->insData[i].lock() != nullptr) {
auto creator = currentLayer->insData[i].lock()->getCreatorLayer().lock();
auto creator = getCreatorLayer(currentLayer->insData[i].lock()).lock();
if (creator) {
creatorName = creator->name;
}
@ -391,7 +391,7 @@ static CNNLayerPtr replace_with_static_reshape(CNNLayerPtr &layer) {
reshape->shape = std::vector<int>(shape.begin(), shape.end());
// replacement
auto &input_to_map = in_data->getInputTo();
auto &input_to_map = getInputTo(in_data);
// try to find by name
auto found_by_name = input_to_map.find(layer->name);
@ -409,7 +409,7 @@ static CNNLayerPtr replace_with_static_reshape(CNNLayerPtr &layer) {
reshape->insData = {in_data};
reshape->outData = {out_data};
out_data->getCreatorLayer() = reshape;
getCreatorLayer(out_data) = reshape;
return reshape;
}
@ -419,7 +419,7 @@ void ConstTransformer::trimShapeInputs(const std::vector<CNNLayerPtr>& constLaye
for (const auto& layer : constLayers) {
if (layer->outData.size() == 1 && layer->type == "Const" && layer->insData.empty()) {
auto constData = layer->outData[0];
std::map<std::string, CNNLayerPtr> inputToMap = constData->getInputTo();
std::map<std::string, CNNLayerPtr> inputToMap = getInputTo(constData);
for (const auto& inputTo : inputToMap) {
CNNLayerPtr inputToLayer = inputTo.second;
if (shapeTaking.find(inputToLayer->type) != shapeTaking.end()) {
@ -429,11 +429,11 @@ void ConstTransformer::trimShapeInputs(const std::vector<CNNLayerPtr>& constLaye
});
if (it != insData.end() && std::distance(insData.begin(), it) == 1) {
inputToLayer->insData.erase(it);
constData->getInputTo().erase(inputTo.first);
getInputTo(constData).erase(inputTo.first);
}
}
}
if (constData->getInputTo().empty()) {
if (getInputTo(constData).empty()) {
layer_to_remove.push_back(layer);
data_to_remove.push_back(constData);
}
@ -476,7 +476,7 @@ void ConstTransformer::cleanup() {
// Subgraph case
auto &const_holder = inputs.back();
if (const_holder->getPrecision() == Precision::UNSPECIFIED) {
auto &holder_map = const_holder->getInputTo();
auto &holder_map = getInputTo(const_holder);
// Remove from const holder data object
for (const auto &layer : layer_to_remove) {
auto self_found = std::find_if(holder_map.begin(), holder_map.end(),

View File

@ -169,7 +169,7 @@ CNNLayer::Ptr NodeConverter<ngraph::op::TensorIterator>::createLayer(const std::
for (const auto& param : parameters) {
auto info = in_info_map_with_parameters.at(param->get_friendly_name());
auto data_ptr = info->getInputData();
auto input_to = data_ptr->getInputTo();
auto input_to = getInputTo(data_ptr);
for (const auto& next_layer : input_to) {
auto port_idx = find_input_idx(next_layer.second, data_ptr);
ngraph_parameter_id_to_ie_layer_port[counter].push_back({next_layer.first, port_idx});
@ -190,7 +190,7 @@ CNNLayer::Ptr NodeConverter<ngraph::op::TensorIterator>::createLayer(const std::
// This deep copy will hold all unreachable constants. See the comment in CopyTIBody function.
auto deep_cp_body = InferenceEngine::NetPass::CopyTIBody(temp_body);
for (const auto& data_ptr : deep_cp_body.inputs) {
auto input_to = data_ptr->getInputTo();
auto input_to = getInputTo(data_ptr);
for (const auto& node : input_to) {
// Make it compatible with ir v7: delete Input layers in body
if (node.second->type != "Input") {
@ -212,7 +212,7 @@ CNNLayer::Ptr NodeConverter<ngraph::op::TensorIterator>::createLayer(const std::
for (const auto& input_layer : body_input_layers) {
// Save all constants to the holder so that they are not deleted.
if (input_layer->insData.empty()) {
holder->getInputTo()[input_layer->name] = input_layer;
getInputTo(holder)[input_layer->name] = input_layer;
continue;
}
@ -225,7 +225,7 @@ CNNLayer::Ptr NodeConverter<ngraph::op::TensorIterator>::createLayer(const std::
DataPtr data(new Data(data_name, layer_name_to_tensor_desc[input_layer->name][i]));
input_layer->insData[i] = data;
data->getInputTo()[input_layer->name] = input_layer;
getInputTo(data)[input_layer->name] = input_layer;
in_info_map[data_name] = data;
}
}
@ -242,7 +242,7 @@ CNNLayer::Ptr NodeConverter<ngraph::op::TensorIterator>::createLayer(const std::
res->body.inputs.emplace_back(in.second);
// Fill the map to get the input index by layer and port of the body.
auto input_to = in.second->getInputTo();
auto input_to = getInputTo(in.second);
for (const auto& next_layer : input_to) {
auto port_idx = find_input_idx(next_layer.second, in.second);
ie_layer_port_to_tensor_iterator_input_id[{next_layer.first, port_idx}] = counter;

View File

@ -1431,7 +1431,7 @@ void SpaceToBatchValidator::parseParams(CNNLayer* layer) {
if (dataPtr->getTensorDesc().getPrecision() != Precision::I32
&& dataPtr->getTensorDesc().getPrecision() != Precision::I64)
THROW_IE_EXCEPTION << "'" << layerName << "' layer has invalid input precision";
auto creator = dataPtr->getCreatorLayer().lock();
auto creator = getCreatorLayer(dataPtr).lock();
if (creator == nullptr)
THROW_IE_EXCEPTION << "'" << layerName << "' layer has nullable input layer";
@ -1497,7 +1497,7 @@ void BatchToSpaceValidator::parseParams(CNNLayer* layer) {
if (dataPtr->getTensorDesc().getPrecision() != Precision::I32
&& dataPtr->getTensorDesc().getPrecision() != Precision::I64)
THROW_IE_EXCEPTION << "'" << layerName << "' layer has invalid input precision";
auto creator = dataPtr->getCreatorLayer().lock();
auto creator = getCreatorLayer(dataPtr).lock();
if (creator == nullptr)
THROW_IE_EXCEPTION << "'" << layerName << "' layer has nullable input layer";

View File

@ -34,8 +34,8 @@ using namespace details;
DataPtr cloneData(const InferenceEngine::Data& source) {
auto cloned = std::make_shared<InferenceEngine::Data>(source);
if (cloned != nullptr) {
cloned->getCreatorLayer().reset();
cloned->getInputTo().clear();
getCreatorLayer(cloned).reset();
getInputTo(cloned).clear();
}
return cloned;
}
@ -269,22 +269,22 @@ details::CNNNetworkImplPtr cloneNet(const std::vector<CNNLayerPtr>& layers) {
string inputName;
// Find input name
for (auto&& inp : data->getInputTo()) {
for (auto&& inp : getInputTo(data)) {
if (srcLayer == inp.second) {
inputName = inp.first;
break;
}
}
assert(!inputName.empty());
clonedData->getInputTo().insert({inputName, clonedLayer});
getInputTo(clonedData).insert({inputName, clonedLayer});
clonedLayer->insData.push_back(clonedData);
}
for (auto&& data : srcLayer->outData) {
auto clonedData = createDataImpl(data);
clonedData->getCreatorLayer() = clonedLayer;
getCreatorLayer(clonedData) = clonedLayer;
clonedLayer->outData.push_back(clonedData);
for (auto&& inp : data->getInputTo()) {
for (auto&& inp : getInputTo(data)) {
auto layer = inp.second;
// TODO(amalyshe) is it the best place to check priorbox and remove
// such edge from outputs?
@ -299,7 +299,7 @@ details::CNNNetworkImplPtr cloneNet(const std::vector<CNNLayerPtr>& layers) {
}
for (auto&& data : clonedDatas) {
auto layer = data->getCreatorLayer().lock();
auto layer = getCreatorLayer(data).lock();
// create an artificial input layer because logic in some algorithms rely
// on existence of these layers in the network
if (nullptr == layer) {
@ -307,13 +307,13 @@ details::CNNNetworkImplPtr cloneNet(const std::vector<CNNLayerPtr>& layers) {
auto originalData = clonedDataMap[data];
assert(nullptr != originalData);
if (auto originalLayer = originalData->getCreatorLayer().lock()) {
if (auto originalLayer = getCreatorLayer(originalData).lock()) {
if (CaselessEq<string>()(originalLayer->type, "input") ||
CaselessEq<string>()(originalLayer->type, "const") ||
CaselessEq<string>()(originalLayer->type, "memory")) {
layer = cloneLayerImpl(*originalLayer);
layer->outData.push_back(data);
data->getCreatorLayer() = layer;
getCreatorLayer(data) = layer;
}
}
@ -322,7 +322,7 @@ details::CNNNetworkImplPtr cloneNet(const std::vector<CNNLayerPtr>& layers) {
layer = std::make_shared<CNNLayer>(params);
// this place should be transactional
layer->outData.push_back(data);
data->getCreatorLayer() = layer;
getCreatorLayer(data) = layer;
net->addLayer(layer);
}
}
@ -526,8 +526,8 @@ struct NodePrinter {
ss << data->getTensorDesc().getLayout();
printed_properties.emplace_back("layout", ss.str());
printed_properties.emplace_back("name", data->getName());
if (data->getCreatorLayer().lock() != nullptr)
printed_properties.emplace_back("creator layer", data->getCreatorLayer().lock()->name);
if (getCreatorLayer(data).lock() != nullptr)
printed_properties.emplace_back("creator layer", getCreatorLayer(data).lock()->name);
printNode(node_name, data->getName(), node_properties, printed_properties);
}

View File

@ -46,7 +46,7 @@ static std::vector<DataPtr> getAllInputs(const std::vector<DataPtr>& heads) {
// Define all start layers
for (const auto& data : heads) {
auto& secondLayers = data->getInputTo();
auto& secondLayers = getInputTo(data);
if (secondLayers.empty()) continue;
@ -65,7 +65,7 @@ static std::vector<DataPtr> getAllInputs(const std::vector<DataPtr>& heads) {
// layers from head (like const placeholders)
for (auto& starter : inputLayers) {
DataPtr holder(new Data(starter->name + ":input_holder", starter->precision));
holder->getInputTo()[starter->name] = starter;
getInputTo(holder)[starter->name] = starter;
res.push_back(holder);
}
@ -102,7 +102,7 @@ TensorIterator::Body CopyTIBody(const TensorIterator::Body& body, std::string su
std::unordered_map<Data*, DataPtr> old2new_d;
for (auto& in : body.inputs) {
auto new_data = std::make_shared<Data>(*in.get());
for (auto& to : new_data->getInputTo()) to.second = old2new_l[to.second.get()];
for (auto& to : getInputTo(new_data)) to.second = old2new_l[to.second.get()];
old2new_d[in.get()] = new_data;
}
@ -113,10 +113,10 @@ TensorIterator::Body CopyTIBody(const TensorIterator::Body& body, std::string su
for (int i = 0; i < old->outData.size(); i++) {
auto old_data = old->outData[i];
auto new_data = new_one->outData[i];
new_data->getCreatorLayer() = CNNLayerWeakPtr(new_one);
getCreatorLayer(new_data) = CNNLayerWeakPtr(new_one);
old2new_d[old_data.get()] = new_data;
for (auto& to : new_data->getInputTo()) to.second = old2new_l[to.second.get()];
for (auto& to : getInputTo(new_data)) to.second = old2new_l[to.second.get()];
}
// remap input data
for (int i = 0; i < old->insData.size(); i++) {
@ -133,8 +133,8 @@ TensorIterator::Body CopyTIBody(const TensorIterator::Body& body, std::string su
auto old_name = layer->name;
layer->name += suffix;
for (auto& ins : layer->insData) {
ins.lock()->getInputTo().erase(old_name);
ins.lock()->getInputTo()[layer->name] = layer;
getInputTo(ins.lock()).erase(old_name);
getInputTo(ins.lock())[layer->name] = layer;
}
}
for (auto& kvp : old2new_d) kvp.second->setName(kvp.second->getName() + suffix);
@ -160,7 +160,7 @@ TensorIterator::Body CopyTIBody(const TensorIterator::Body& body, std::string su
for (auto &in : res.inputs) {
// fake holder Data should have UNSPECIFIED precision
if (in->getPrecision() == Precision::UNSPECIFIED) {
for (const auto &kvp : in->getInputTo()) {
for (const auto &kvp : getInputTo(in)) {
already_on_hold.emplace(kvp.second);
}
}
@ -181,7 +181,7 @@ TensorIterator::Body CopyTIBody(const TensorIterator::Body& body, std::string su
auto holder = res.inputs.back();
for (auto layer : to_hold) {
holder->getInputTo()[layer->name] = layer;
getInputTo(holder)[layer->name] = layer;
}
}
return res;
@ -263,14 +263,14 @@ static RuleClassSet classifyOutputRules(const TensorIterator& ti) {
* @param slave
*/
void CombineData(DataPtr& master, DataPtr& slave) {
for (auto& kvp : slave->getInputTo()) {
for (auto& kvp : getInputTo(slave)) {
auto& slave_layer = kvp.second;
for (auto& slv_ins_wptr : slave_layer->insData) {
auto slv_ins = slv_ins_wptr.lock();
// Replace slave ptr with master
if (slv_ins == slave) slv_ins_wptr = master;
}
master->getInputTo()[slave_layer->name] = slave_layer;
getInputTo(master)[slave_layer->name] = slave_layer;
}
}
@ -284,7 +284,7 @@ void CombineData(DataPtr& master, DataPtr& slave) {
template <typename NET>
void SaveOutputDataName(InferenceEngine::DataPtr in_data, InferenceEngine::DataPtr out_data, NET &net) {
// TODO: update outputs of the network if out_data was output
if (out_data->getInputTo().empty()) {
if (getInputTo(out_data).empty()) {
auto data_name = out_data->getName();
in_data->setName(data_name);
}
@ -295,13 +295,13 @@ void SaveOutputDataName(InferenceEngine::DataPtr in_data, InferenceEngine::DataP
* NET = ICNNNetwork
*/
void SaveOutputDataName(InferenceEngine::DataPtr in_data, InferenceEngine::DataPtr out_data, ICNNNetwork& net) {
if (out_data->getInputTo().empty()) {
if (getInputTo(out_data).empty()) {
InferenceEngine::OutputsDataMap outputs_data_map;
net.getOutputsInfo(outputs_data_map);
auto out_data_name = out_data->getName();
in_data->setName(out_data_name);
if (outputs_data_map.count(out_data_name)) {
auto parent_layer_ptr = in_data->getCreatorLayer().lock();
auto parent_layer_ptr = getCreatorLayer(in_data).lock();
IE_ASSERT(parent_layer_ptr != nullptr);
auto parent_layer_name = parent_layer_ptr->name;
size_t in_data_out_index = 0;
@ -332,7 +332,7 @@ void RemoveLayer(CNNLayerPtr& layer, NET &net) {
auto out_data = layer->outData[0];
IE_ASSERT(in_data->getTensorDesc() == out_data->getTensorDesc());
auto &input_to_map = in_data->getInputTo();
auto &input_to_map = getInputTo(in_data);
auto self_found = std::find_if(input_to_map.begin(), input_to_map.end(),
[&layer] (const std::pair<std::string, CNNLayerPtr> &kvp) {
return kvp.second == layer;
@ -396,8 +396,8 @@ bool convertToRNNSeq(CNNLayerPtr cur, const N& net) {
IE_ASSERT(cell->insData.size() == NS + 1); // {data, state1, [state2]}
IE_ASSERT(cell->outData.size() == NS); // {state1, [state2]}
if (cell->insData[0].lock()->getCreatorLayer().lock() != rsp1 ||
cell->outData[0]->getInputTo().begin()->second != rsp2)
if (getCreatorLayer(cell->insData[0].lock()).lock() != rsp1 ||
getInputTo(cell->outData[0]).begin()->second != rsp2)
return false;
// Check port mapping
@ -478,13 +478,13 @@ bool convertToRNNSeq(CNNLayerPtr cur, const N& net) {
for (int i : i_order) {
auto in_data = ti->insData[i].lock();
in_data->getInputTo().erase(ti->name);
in_data->getInputTo()[rnn->name] = rnn;
getInputTo(in_data).erase(ti->name);
getInputTo(in_data)[rnn->name] = rnn;
rnn->insData.push_back(in_data);
}
for (int i : o_order) {
rnn->outData.push_back(ti->outData[i]);
rnn->outData.back()->getCreatorLayer() = rnn;
getCreatorLayer(rnn->outData.back()) = rnn;
}
return true;
@ -512,7 +512,7 @@ bool unrollTI(CNNLayerPtr cur, ICNNNetwork& net) {
auto holder = body_list[i].inputs.back();
if (holder->getPrecision() == Precision::UNSPECIFIED) {
for (auto kvp : holder->getInputTo()) {
for (auto kvp : getInputTo(holder)) {
if (inet) inet->addLayer(kvp.second);
else ngraphnet->addLayer(kvp.second);
}
@ -523,8 +523,8 @@ bool unrollTI(CNNLayerPtr cur, ICNNNetwork& net) {
std::tie(first_class, second_class, third_class) = classifyInputRules(*ti);
/** Clean links on TI */
for (auto& ins : ti->insData) ins.lock()->getInputTo().erase(ti->name);
for (auto& outs : ti->outData) outs->getCreatorLayer().reset();
for (auto& ins : ti->insData) getInputTo(ins.lock()).erase(ti->name);
for (auto& outs : ti->outData) getCreatorLayer(outs).reset();
/** FIRST class comes */
for (int i = 0; i < first_class.size(); i++) {
@ -536,12 +536,12 @@ bool unrollTI(CNNLayerPtr cur, ICNNNetwork& net) {
split->_axis = rule.axis;
split->outData.resize(num);
split->insData.emplace_back(in_data);
in_data->getInputTo()[split->name] = split;
getInputTo(in_data)[split->name] = split;
for (int j = 0; j < num; j++) {
auto body_idx = rule.stride == 1 ? j : num - 1 - j;
auto& chunk = body_list[body_idx].inputs[rule.to];
chunk->getCreatorLayer() = split;
getCreatorLayer(chunk) = split;
split->outData[j] = chunk;
}
}
@ -586,12 +586,12 @@ bool unrollTI(CNNLayerPtr cur, ICNNNetwork& net) {
concat->_axis = rule.axis;
concat->insData.resize(num);
concat->outData.emplace_back(out_data);
out_data->getCreatorLayer() = concat;
getCreatorLayer(out_data) = concat;
for (int j = 0; j < num; j++) {
auto body_idx = rule.stride == 1 ? j : num - 1 - j;
auto& chunk = body_list[body_idx].outputs[rule.to];
chunk->getInputTo()[concat->name] = concat;
getInputTo(chunk)[concat->name] = concat;
concat->insData[j] = chunk;
}
}
@ -612,9 +612,9 @@ bool unrollTI(CNNLayerPtr cur, ICNNNetwork& net) {
auto& from_data = ti->outData[rule.from];
auto& to_data = body_list[num - 1].outputs[rule.to];
auto parent = to_data->getCreatorLayer().lock();
auto parent = getCreatorLayer(to_data).lock();
std::replace(parent->outData.begin(), parent->outData.end(), to_data, from_data);
from_data->getCreatorLayer() = parent;
getCreatorLayer(from_data) = parent;
CombineData(from_data, to_data);
}
@ -633,7 +633,7 @@ static CNNLayerPtr _concat(std::string name, Precision prc, SizeVector dims, int
res->outData.resize(1);
auto out_data = DataPtr(new Data(name, TensorDesc {prc, dims, TensorDesc::getLayoutByDims(dims)}));
out_data->getCreatorLayer() = res;
getCreatorLayer(out_data) = res;
res->outData[0] = out_data;
return res;
@ -650,7 +650,7 @@ static CNNLayerPtr _split(std::string name, Precision prc, SizeVector dims, int
for (int i = 0; i < num; i++) {
auto out_data = DataPtr(
new Data(name + "_part_" + std::to_string(i), TensorDesc {prc, dims, TensorDesc::getLayoutByDims(dims)}));
out_data->getCreatorLayer() = res;
getCreatorLayer(out_data) = res;
res->outData[i] = out_data;
}
@ -671,7 +671,7 @@ static CNNLayerPtr _fc(std::string name, Precision prc, SizeVector dims, Blob::P
res->outData.resize(1);
auto out_data = DataPtr(new Data(name, TensorDesc {prc, dims, TensorDesc::getLayoutByDims(dims)}));
out_data->getCreatorLayer() = res;
getCreatorLayer(out_data) = res;
res->outData[0] = out_data;
return res;
@ -686,7 +686,7 @@ static std::shared_ptr<ClampLayer> _act(std::string name, Precision prc, SizeVec
res->outData.resize(1);
auto out_data = DataPtr(new Data(name, TensorDesc {prc, dims, TensorDesc::getLayoutByDims(dims)}));
out_data->getCreatorLayer() = res;
getCreatorLayer(out_data) = res;
res->outData[0] = out_data;
return res;
@ -706,7 +706,7 @@ static CNNLayerPtr _pwr(std::string name, Precision prc, SizeVector dims, float
res->outData.resize(1);
auto out_data = DataPtr(new Data(name, TensorDesc {prc, dims, TensorDesc::getLayoutByDims(dims)}));
out_data->getCreatorLayer() = res;
getCreatorLayer(out_data) = res;
res->outData[0] = out_data;
return res;
@ -722,7 +722,7 @@ static CNNLayerPtr _eltw(std::string name, Precision prc, SizeVector dims, std::
res->outData.resize(1);
auto out_data = DataPtr(new Data(name, TensorDesc {prc, dims, TensorDesc::getLayoutByDims(dims)}));
out_data->getCreatorLayer() = res;
getCreatorLayer(out_data) = res;
res->outData[0] = out_data;
return res;
@ -735,7 +735,7 @@ static std::shared_ptr<ReshapeLayer> _resh(std::string name, Precision prc, Size
res->outData.resize(1);
auto out_data = DataPtr(new Data(name, TensorDesc {prc, dims, TensorDesc::getLayoutByDims(dims)}));
out_data->getCreatorLayer() = res;
getCreatorLayer(out_data) = res;
res->outData[0] = out_data;
return res;
@ -765,13 +765,13 @@ static std::shared_ptr<RNNCellBase> _cell(std::string name, Precision prc, SizeV
auto out_data =
DataPtr(new Data(name + ":out_data", TensorDesc {prc, data_dims, TensorDesc::getLayoutByDims(data_dims)}));
out_data->getCreatorLayer() = res;
getCreatorLayer(out_data) = res;
res->outData[0] = out_data;
for (size_t i = 0; i < NS; i++) {
auto out_state = DataPtr(new Data(name + ":out_state_" + std::to_string(i),
TensorDesc {prc, state_dims, TensorDesc::getLayoutByDims(state_dims)}));
out_state->getCreatorLayer() = res;
getCreatorLayer(out_state) = res;
res->outData[i] = out_state;
}
@ -789,12 +789,12 @@ static std::shared_ptr<TensorIterator> _ti(std::string name, Precision prc, size
static void _link(CNNLayerPtr src, CNNLayerPtr dst, size_t src_port = 0, size_t dst_port = 0) {
auto data = src->outData[src_port];
data->getInputTo()[dst->name] = dst;
getInputTo(data)[dst->name] = dst;
dst->insData[dst_port] = data;
}
static void _link(DataPtr& data, CNNLayerPtr dst, size_t dst_port = 0) {
data->getInputTo()[dst->name] = dst;
getInputTo(data)[dst->name] = dst;
dst->insData[dst_port] = data;
}
@ -886,8 +886,8 @@ static bool unrollRNNCellBody(CNNLayerPtr cur) {
auto prc = cell->precision;
/** Release links on TI */
for (auto& ins : cell->insData) ins.lock()->getInputTo().erase(cell->name);
for (auto& outs : cell->outData) outs->getCreatorLayer().reset();
for (auto& ins : cell->insData) getInputTo(ins.lock()).erase(cell->name);
for (auto& outs : cell->outData) getCreatorLayer(outs).reset();
// operations
auto concat = _concat(name + ":concat", prc, {N, D + S}, 2);
@ -902,7 +902,7 @@ static bool unrollRNNCellBody(CNNLayerPtr cur) {
// Output
act->outData[0] = out_h_state;
out_h_state->getCreatorLayer() = act;
getCreatorLayer(out_h_state) = act;
return true;
}
@ -932,8 +932,8 @@ static bool unrollLSTMCellBody(CNNLayerPtr cur) {
auto prc = cell->precision;
/** Release links on TI */
for (auto& ins : cell->insData) ins.lock()->getInputTo().erase(cell->name);
for (auto& outs : cell->outData) outs->getCreatorLayer().reset();
for (auto& ins : cell->insData) getInputTo(ins.lock()).erase(cell->name);
for (auto& outs : cell->outData) getCreatorLayer(outs).reset();
// operations
auto concat = _concat(name + ":concat", prc, {N, D + S}, 2);
@ -981,11 +981,11 @@ static bool unrollLSTMCellBody(CNNLayerPtr cur) {
// Output
mul->outData[0] = out_h_state;
out_h_state->getCreatorLayer() = mul;
getCreatorLayer(out_h_state) = mul;
CombineData(out_c_state, sum->outData[0]);
sum->outData[0] = out_c_state;
out_c_state->getCreatorLayer() = sum;
getCreatorLayer(out_c_state) = sum;
return true;
}
@ -1022,8 +1022,8 @@ static bool unrollGRUCellBody(CNNLayerPtr cur, bool linear_before_reset = false)
auto prc = cell->precision;
/** Release links on TI */
for (auto& ins : cell->insData) ins.lock()->getInputTo().erase(cell->name);
for (auto& outs : cell->outData) outs->getCreatorLayer().reset();
for (auto& ins : cell->insData) getInputTo(ins.lock()).erase(cell->name);
for (auto& outs : cell->outData) getCreatorLayer(outs).reset();
// operations
auto concat = _concat(name + ":concat", prc, {N, D + S}, 2);
@ -1097,7 +1097,7 @@ static bool unrollGRUCellBody(CNNLayerPtr cur, bool linear_before_reset = false)
// Output
sum->outData[0] = out_h_state;
out_h_state->getCreatorLayer() = sum;
getCreatorLayer(out_h_state) = sum;
return true;
}
@ -1138,8 +1138,8 @@ static bool unrollSeq(CNNLayerPtr cur) {
const auto prc = seq->precision;
/** Release links on Seq */
for (auto& ins : seq->insData) ins.lock()->getInputTo().erase(seq->name);
for (auto& outs : seq->outData) outs->getCreatorLayer().reset();
for (auto& ins : seq->insData) getInputTo(ins.lock()).erase(seq->name);
for (auto& outs : seq->outData) getCreatorLayer(outs).reset();
/** Body subgraph*/
auto in_d_body_dims = in_d_dims;
@ -1182,7 +1182,7 @@ static bool unrollSeq(CNNLayerPtr cur) {
_link(in_data, ti, 0);
ti->outData[0] = out_data;
out_data->getCreatorLayer() = ti;
getCreatorLayer(out_data) = ti;
ti->body.inputs.push_back(body_in_data);
ti->body.outputs.push_back(resh2->outData[0]);
@ -1199,7 +1199,7 @@ static bool unrollSeq(CNNLayerPtr cur) {
auto out_state = seq->outData[1 + i];
ti->outData[1 + i] = out_state;
out_state->getCreatorLayer() = ti;
getCreatorLayer(out_state) = ti;
auto body_in_state = DataPtr(new Data(name + ":state_in_" + std::to_string(i),
TensorDesc {prc, state_dims, TensorDesc::getLayoutByDims(state_dims)}));

View File

@ -13,6 +13,7 @@
#include <unordered_set>
#include <sstream>
#include "ie_layers.h"
#include "details/caseless.hpp"
#include "details/ie_cnn_network_tools.h"
#include "exec_graph_info.hpp"
@ -313,7 +314,7 @@ std::vector<CNNLayerPtr> TopologicalSort(const ICNNNetwork& network) {
auto get_consumers = [](const CNNLayerPtr& node) -> std::vector<CNNLayerPtr> {
std::vector<CNNLayerPtr> consumers;
for (const auto & output : node->outData) {
for (const auto &consumer : output->getInputTo()) {
for (const auto &consumer : getInputTo(output)) {
consumers.push_back(consumer.second);
}
}
@ -336,7 +337,7 @@ std::vector<CNNLayerPtr> TopologicalSort(const ICNNNetwork& network) {
if (!locked_input) {
THROW_IE_EXCEPTION << "insData for " << node->name << " is not valid.";
}
if (auto next_node = locked_input->getCreatorLayer().lock()) {
if (auto next_node = getCreatorLayer(locked_input).lock()) {
if (!used.count(next_node->name)) {
// Check that all consumers were used
bool all_consumers_used(true);
@ -364,14 +365,14 @@ std::vector<CNNLayerPtr> TopologicalSort(const ICNNNetwork& network) {
// First we run bfs starting from outputs that provides deterministic graph traverse
for (const auto & output : outputs) {
if (!used.count(output.first)) {
bfs(output.second->getCreatorLayer().lock());
bfs(getCreatorLayer(output.second).lock());
}
}
// For cases when graph has no outputs we start bfs from inputs to ensure topological sort
for (const auto & input : inputs) {
const auto data_ptr = input.second->getInputData();
for (const auto & consumer : data_ptr->getInputTo())
for (const auto & consumer : getInputTo(data_ptr))
if (!used.count(consumer.first)) {
bfs(consumer.second, true);
}
@ -597,7 +598,7 @@ std::size_t FillXmlDoc(const InferenceEngine::ICNNNetwork& network, pugi::xml_do
}
for (size_t oport = 0; oport < node->outData.size(); oport++) {
const DataPtr outData = node->outData[oport];
for (const auto& inputTo : outData->getInputTo()) {
for (const auto& inputTo : getInputTo(outData)) {
for (int iport = 0; iport < inputTo.second->insData.size(); iport++) {
if (inputTo.second->insData[iport].lock() == outData) {
auto itTo = matching.find(inputTo.second);

View File

@ -139,7 +139,7 @@ void OutputController::propagateShapes(const std::set<ReshapeLauncher::Ptr>& lau
checkCorrespondence();
unsigned idx = 0;
for (auto const& outData : _dataVec) {
for (auto const& inputTo : outData->getInputTo()) {
for (auto const& inputTo : getInputTo(outData)) {
CNNLayerPtr layer = inputTo.second;
if (layer == nullptr) {
THROW_IE_EXCEPTION << "Failed to propagate shapes for layer (" << inputTo.first
@ -162,7 +162,7 @@ void OutputController::propagateShapes(const std::set<ReshapeLauncher::Ptr>& lau
void OutputController::propagateBlobs(const std::set<ReshapeLauncher::Ptr>& launchers) {
unsigned idx = 0;
for (auto const& outData : _dataVec) {
for (auto const& inputTo : outData->getInputTo()) {
for (auto const& inputTo : getInputTo(outData)) {
CNNLayerPtr layer = inputTo.second;
if (layer == nullptr) {
THROW_IE_EXCEPTION << "Failed to propagate shapes for layer (" << inputTo.first

View File

@ -45,7 +45,7 @@ Reshaper::Reshaper(std::vector<DataPtr> insDatas, const LauncherCreator::Ptr& la
_allSortedLayers = SortTopologicallyStartsFrom(insDatas);
for (auto& in_data : insDatas) {
for (auto layer : in_data->getInputTo()) {
for (auto layer : getInputTo(in_data)) {
_inputLayers.insert(layer.second);
}
}
@ -225,7 +225,7 @@ StatusCode Reshaper::apply(ResponseDesc* resp) {
}
SizeVector Reshaper::getResultShapeFor(DataPtr& data, ResponseDesc* resp) {
auto creator_layer = data->getCreatorLayer().lock();
auto creator_layer = getCreatorLayer(data).lock();
std::string creator_layer_name;
if (creator_layer) {
creator_layer_name = creator_layer->name;

View File

@ -10,6 +10,7 @@
#include <vector>
#include <unordered_set>
#include "ie_layers.h"
#include "cnn_network_impl.hpp"
#include "low_precision_transformations/common/dequantization_details.hpp"
@ -222,7 +223,7 @@ private:
}
IE_SUPPRESS_DEPRECATED_START
const CNNLayerPtr blobLayer = data->getCreatorLayer().lock();
const CNNLayerPtr blobLayer = getCreatorLayer(data).lock();
if (blobLayer == nullptr) {
THROW_IE_EXCEPTION << "parent layer is absent for " << quantize.type << " layer " << quantize.name;
}

View File

@ -164,7 +164,7 @@ void EltwiseTransformation::transform(TransformationContext& context, CNNLayer&
if (emptyPathData == nullptr) {
THROW_IE_LPT_EXCEPTION(eltwise) << "data for empty path is absent";
}
const CNNLayerPtr emptyPathDequantizationLayer = emptyPathData->getCreatorLayer().lock();
const CNNLayerPtr emptyPathDequantizationLayer = getCreatorLayer(emptyPathData).lock();
{
fillFromDequantizationLayer(*emptyPathDequantizationLayer, emptyPathDequantizationScales, emptyPathDequantizationShifts);
@ -181,7 +181,7 @@ void EltwiseTransformation::transform(TransformationContext& context, CNNLayer&
if (fullPathData == nullptr) {
THROW_IE_LPT_EXCEPTION(eltwise) << "data for full path is absent";
}
const CNNLayerPtr fullPathDequantizationLayer = fullPathData->getCreatorLayer().lock();
const CNNLayerPtr fullPathDequantizationLayer = getCreatorLayer(fullPathData).lock();
std::vector<float> fullPathDequantizationScales;
std::vector<float> fullPathDequantizationShifts;
fillFromDequantizationLayer(*fullPathDequantizationLayer, fullPathDequantizationScales, fullPathDequantizationShifts);
@ -238,10 +238,10 @@ bool isBranchWithTargetType(const CNNLayer& fakeQuantize, const std::string& typ
return false;
}
if ((fakeQuantize.outData.size() == 1) && (fakeQuantize.outData[0]->getInputTo().size() == 1)) {
if ((fakeQuantize.outData.size() == 1) && (getInputTo(fakeQuantize.outData[0]).size() == 1)) {
const CNNLayerPtr parentOnActivation = CNNNetworkHelper::getParent(fakeQuantize, 0);
if ((parentOnActivation != nullptr) && CaselessEq<std::string>()(parentOnActivation->type, type) &&
(parentOnActivation->outData.size() == 1) && (parentOnActivation->outData[0]->getInputTo().size() == 1)) {
(parentOnActivation->outData.size() == 1) && (getInputTo(parentOnActivation->outData[0]).size() == 1)) {
return true;
}
}
@ -293,7 +293,7 @@ int EltwiseTransformation::getNotEmpty(const CNNLayer& eltwise) {
continue;
}
if (parents[i]->outData[0]->getInputTo().size() == 1) {
if (getInputTo(parents[i]->outData[0]).size() == 1) {
return i;
}
}

View File

@ -324,7 +324,7 @@ Precision LayerTransformation::getPrecisionParent(const CNNLayer& layer) {
}
for (const DataPtr outData : parent->outData) {
const auto inputTo = outData->getInputTo();
const auto inputTo = getInputTo(outData);
for (auto it = inputTo.begin(); it != inputTo.end(); ++it) {
if (it->second->name == layer.name) {
return outData->getPrecision();

View File

@ -359,7 +359,7 @@ void CNNNetworkHelper::updateBlobs(const CNNLayer& quantizeLayer, int constLayer
THROW_IE_EXCEPTION << "data is absent";
}
CNNLayerPtr blobLayer = inData->getCreatorLayer().lock();
CNNLayerPtr blobLayer = getCreatorLayer(inData).lock();
if (blobLayer == nullptr) {
THROW_IE_EXCEPTION << "layer is absent";
}
@ -426,7 +426,7 @@ size_t CNNNetworkHelper::getIndex(const CNNLayer& layer) {
if (insData == nullptr) {
continue;
}
const CNNLayerPtr parent = insData->getCreatorLayer().lock();
const CNNLayerPtr parent = getCreatorLayer(insData).lock();
if ((parent != nullptr) && (parent->name == layer.name)) {
return i;
}
@ -447,7 +447,7 @@ std::vector<CNNLayerPtr> CNNNetworkHelper::transformFakeQuantizeToConst(Transfor
if (insData == nullptr) {
THROW_IE_EXCEPTION << "input data for FakeQuantize '" << fakeQuantize->name << "' is nullable";
}
const CNNLayerPtr parent = insData->getCreatorLayer().lock();
const CNNLayerPtr parent = getCreatorLayer(insData).lock();
if (parent == nullptr) {
THROW_IE_EXCEPTION << "input layer for FakeQuantize '" << fakeQuantize->name << "' is nullable";
}
@ -474,7 +474,7 @@ std::vector<CNNLayerPtr> CNNNetworkHelper::transformFakeQuantizeToConst(Transfor
}
// const Precision precision = outData->getPrecision();
const auto inputTo = outData->getInputTo();
const auto inputTo = getInputTo(outData);
std::vector<CNNLayerPtr> constLayers;
for (auto it : inputTo) {
const CNNLayerPtr child = it.second;
@ -783,7 +783,7 @@ CNNLayerPtr CNNNetworkHelper::getParent(const CNNLayer& layer, const size_t inde
CNNLayerPtr inputLayer;
do {
inputLayer = inputLayerData->getCreatorLayer().lock();
inputLayer = getCreatorLayer(inputLayerData).lock();
if (!inputLayer) {
THROW_IE_EXCEPTION << "input is absent";
}
@ -818,7 +818,7 @@ std::vector<CNNLayerPtr> CNNNetworkHelper::getParents(const CNNLayer& layer, con
THROW_IE_EXCEPTION << "input data is absent";
}
CNNLayerPtr parent = insData->getCreatorLayer().lock();
CNNLayerPtr parent = getCreatorLayer(insData).lock();
if (parent == nullptr) {
THROW_IE_EXCEPTION << "input layer is absent";
}
@ -844,7 +844,7 @@ std::vector<CNNLayerPtr> CNNNetworkHelper::getParentsRecursivelyExceptTypes(
THROW_IE_EXCEPTION << "input data is absent";
}
CNNLayerWeakPtr parentWeak = insData->getCreatorLayer();
CNNLayerWeakPtr parentWeak = getCreatorLayer(insData);
if (parentWeak.expired()) {
continue;
}
@ -941,8 +941,8 @@ CNNLayerPtr CNNNetworkHelper::addLayer(
int l1_out_i = 0;
if (child != nullptr) {
for (; l1_out_i < parent->outData.size(); l1_out_i++) {
if (parent->outData[l1_out_i]->getInputTo().find(child->name) !=
parent->outData[l1_out_i]->getInputTo().end()) {
if (getInputTo(parent->outData[l1_out_i]).find(child->name) !=
getInputTo(parent->outData[l1_out_i]).end()) {
break;
}
}
@ -981,7 +981,7 @@ void CNNNetworkHelper::replaceLayer(TransformationContext& context, const CNNLay
for (CNNLayerPtr parent : parents) {
for (size_t outDataIndex = 0ul; outDataIndex < parent->outData.size(); ++outDataIndex) {
const DataPtr outData = parent->outData[outDataIndex];
std::map<std::string, CNNLayerPtr>& inputTo = outData->getInputTo();
std::map<std::string, CNNLayerPtr>& inputTo = getInputTo(outData);
inputTo[source->name] = target;
target->insData.push_back(outData);
}
@ -995,14 +995,14 @@ void CNNNetworkHelper::replaceLayer(TransformationContext& context, const CNNLay
networkImpl->removeData(outData->getName());
DataPtr newOutData(new Data(outData->getName(), outData->getTensorDesc()));
newOutData->getCreatorLayer() = target;
getCreatorLayer(newOutData) = target;
target->outData[outDataIndex] = newOutData;
networkImpl->addData(newOutData->getName().c_str(), newOutData);
std::map<std::string, CNNLayerPtr> inputTo = outData->getInputTo();
std::map<std::string, CNNLayerPtr> inputTo = getInputTo(outData);
for (const auto it : inputTo) {
const CNNLayerPtr child = it.second;
newOutData->getInputTo().emplace(it.first, child);
getInputTo(newOutData).emplace(it.first, child);
for (const CNNLayerPtr& child : children) {
for (size_t insDataIndex = 0ul; insDataIndex < child->insData.size(); ++insDataIndex) {
@ -1011,7 +1011,7 @@ void CNNNetworkHelper::replaceLayer(TransformationContext& context, const CNNLay
THROW_IE_LPT_EXCEPTION(*child) << "insert data " << insDataIndex << " is absent";
}
const CNNLayerPtr parent = insData->getCreatorLayer().lock();
const CNNLayerPtr parent = getCreatorLayer(insData).lock();
if (parent == nullptr) {
THROW_IE_LPT_EXCEPTION(*child) << "parent layer for insert data " << insDataIndex << " is absent";
}
@ -1022,7 +1022,7 @@ void CNNNetworkHelper::replaceLayer(TransformationContext& context, const CNNLay
}
}
}
outData->getInputTo().clear();
getInputTo(outData).clear();
}
networkImpl->addLayer(target);
@ -1064,8 +1064,8 @@ CNNLayerPtr CNNNetworkHelper::addScaleShiftBetween(TransformationContext& contex
int l1_out_i = 0;
if (child != nullptr) {
for (; l1_out_i < parent->outData.size(); l1_out_i++) {
if (parent->outData[l1_out_i]->getInputTo().find(child->name) !=
parent->outData[l1_out_i]->getInputTo().end()) {
if (getInputTo(parent->outData[l1_out_i]).find(child->name) !=
getInputTo(parent->outData[l1_out_i]).end()) {
break;
}
}
@ -1127,8 +1127,8 @@ CNNLayerPtr CNNNetworkHelper::addConstBetween(ICNNNetwork& net, const CNNLayerPt
int l1_out_i = 0;
if (layer2 != nullptr) {
for (; l1_out_i < layer1->outData.size(); l1_out_i++) {
if (layer1->outData[l1_out_i]->getInputTo().find(layer2->name) !=
layer1->outData[l1_out_i]->getInputTo().end()) {
if (getInputTo(layer1->outData[l1_out_i]).find(layer2->name) !=
getInputTo(layer1->outData[l1_out_i]).end()) {
break;
}
}
@ -1168,7 +1168,7 @@ void CNNNetworkHelper::addLayerToCNNNetworkAfterData(
}
if (layer && (nextLayerName.empty() || (parentOutData == nullptr) ||
(parentOutData->getInputTo().find(nextLayerName) != parentOutData->getInputTo().end()))) {
(getInputTo(parentOutData).find(nextLayerName) != getInputTo(parentOutData).end()))) {
auto getTensorDesc = [](CNNLayerPtr& nextLayer) {
const DataPtr insData = nextLayer->insData[0].lock();
if (insData == nullptr) {
@ -1180,8 +1180,8 @@ void CNNNetworkHelper::addLayerToCNNNetworkAfterData(
const TensorDesc& parentTensorDesc = parentOutData != nullptr ? parentOutData->getTensorDesc() : getTensorDesc(nextLayer);
DataPtr newEdgeAfterLayer(new Data(layer->name, parentTensorDesc));
newEdgeAfterLayer->setName(layer->name);
newEdgeAfterLayer->getCreatorLayer() = layer;
newEdgeAfterLayer->getInputTo().clear();
getCreatorLayer(newEdgeAfterLayer) = layer;
getInputTo(newEdgeAfterLayer).clear();
CNNNetworkImpl* netImpl = dynamic_cast<CNNNetworkImpl*>(&net);
if (netImpl == nullptr) {
@ -1193,16 +1193,16 @@ void CNNNetworkHelper::addLayerToCNNNetworkAfterData(
IE_SUPPRESS_DEPRECATED_END
if (parentOutData != nullptr) {
parentOutData->getInputTo()[layer->name] = layer;
getInputTo(parentOutData)[layer->name] = layer;
layer->insData.push_back(parentOutData);
}
layer->outData.push_back(newEdgeAfterLayer);
if (!nextLayerName.empty()) {
// CNNLayerPtr nextLayer = parentOutData->getInputTo()[nextLayerName];
newEdgeAfterLayer->getInputTo()[nextLayerName] = nextLayer;
// CNNLayerPtr nextLayer = getInputTo(parentOutData)[nextLayerName];
getInputTo(newEdgeAfterLayer)[nextLayerName] = nextLayer;
if (parentOutData != nullptr) {
parentOutData->getInputTo().erase(nextLayerName);
getInputTo(parentOutData).erase(nextLayerName);
for (size_t i = 0; i < nextLayer->insData.size(); i++) {
if (nextLayer->insData[i].lock() == parentOutData) {
nextLayer->insData[i] = newEdgeAfterLayer;
@ -1213,7 +1213,7 @@ void CNNNetworkHelper::addLayerToCNNNetworkAfterData(
nextLayer->insData.push_back(newEdgeAfterLayer);
}
} else {
CNNLayerPtr parent = parentOutData->getCreatorLayer().lock();
CNNLayerPtr parent = getCreatorLayer(parentOutData).lock();
if (parent == nullptr) {
THROW_IE_EXCEPTION << "parent data is absent";
}
@ -1246,7 +1246,7 @@ void CNNNetworkHelper::fillInScaleShift(ScaleShiftLayer* layer, const size_t cha
std::vector<CNNLayerPtr> CNNNetworkHelper::getChildren(const CNNLayer& layer, const std::string& exceptionLayerName) {
std::vector<CNNLayerPtr> children;
for (const DataPtr outData : layer.outData) {
const std::map<std::string, CNNLayerPtr>& inputTo = outData->getInputTo();
const std::map<std::string, CNNLayerPtr>& inputTo = getInputTo(outData);
for (auto it = inputTo.begin(); it != inputTo.end(); ++it) {
CNNLayerPtr child = it->second;
if (exceptionLayerName.empty() || child->name != exceptionLayerName) {
@ -1261,7 +1261,7 @@ std::vector<CNNLayerPtr> CNNNetworkHelper::getChildrenRecursivelyExceptTypes(
const CNNLayer& layer, const std::unordered_set<std::string>& exceptionLayerTypes) {
std::vector<CNNLayerPtr> children;
for (const DataPtr outData : layer.outData) {
const std::map<std::string, CNNLayerPtr>& inputTo = outData->getInputTo();
const std::map<std::string, CNNLayerPtr>& inputTo = getInputTo(outData);
for (auto it = inputTo.begin(); it != inputTo.end(); ++it) {
CNNLayerPtr child = it->second;
if (exceptionLayerTypes.find(child->type) != exceptionLayerTypes.end()) {
@ -1327,13 +1327,13 @@ size_t CNNNetworkHelper::disconnectLayers(CNNNetworkImpl* network, const CNNLaye
bool wasFound = false;
for (auto dataIt = parentLayer->outData.begin(); dataIt != parentLayer->outData.end(); ++dataIt) {
auto data = *dataIt;
for (auto inputIt = data->getInputTo().begin(); inputIt != data->getInputTo().end(); ++inputIt) {
for (auto inputIt = getInputTo(data).begin(); inputIt != getInputTo(data).end(); ++inputIt) {
auto currentChildLayer = inputIt->second;
if (currentChildLayer == nullptr) {
THROW_IE_EXCEPTION << "Output layer for '" << parentLayer->name << "'is absent";
}
if (currentChildLayer->name == childLayer->name) {
data->getInputTo().erase(inputIt);
getInputTo(data).erase(inputIt);
wasFound = true;
break;
}
@ -1354,7 +1354,7 @@ size_t CNNNetworkHelper::disconnectLayers(CNNNetworkImpl* network, const CNNLaye
if (data == nullptr) {
THROW_IE_EXCEPTION << "Input layer data for '" << childLayer->name << "'is absent";
}
auto currentParentLayer = data->getCreatorLayer().lock();
auto currentParentLayer = getCreatorLayer(data).lock();
if (currentParentLayer == nullptr) {
THROW_IE_EXCEPTION << "Input layer for '" << childLayer->name << "'is absent";
}
@ -1377,7 +1377,7 @@ size_t CNNNetworkHelper::getInputIndex(const CNNLayerPtr& childLayer, const CNNL
if (currentParenData == nullptr) {
THROW_IE_EXCEPTION << "parent layer data is absent";
}
CNNLayerPtr currentParrentLayer = currentParenData->getCreatorLayer().lock();
CNNLayerPtr currentParrentLayer = getCreatorLayer(currentParenData).lock();
if (currentParrentLayer == nullptr) {
THROW_IE_EXCEPTION << "parent layer is absent";
}
@ -1405,7 +1405,7 @@ void CNNNetworkHelper::removeLayer(ICNNNetwork& network, const CNNLayerPtr& laye
if (data == nullptr) {
THROW_IE_EXCEPTION << "Layer's inserted data is nullptr";
}
CNNLayerPtr parentLayer = data->getCreatorLayer().lock();
CNNLayerPtr parentLayer = getCreatorLayer(data).lock();
if (parentLayer == nullptr) {
THROW_IE_EXCEPTION << "Layer's parent layer is nullptr";
}
@ -1418,7 +1418,7 @@ void CNNNetworkHelper::removeLayer(ICNNNetwork& network, const CNNLayerPtr& laye
std::vector<size_t> childrenIndexes;
if (layer->outData.size() > 0) {
childData = layer->outData[0];
auto inputTo = childData->getInputTo();
auto inputTo = getInputTo(childData);
if (inputTo.size() == 0) {
std::vector<CNNLayerPtr> parents = getParents(*layer);
if (parents.size() != 1) {
@ -1449,7 +1449,7 @@ void CNNNetworkHelper::removeLayer(ICNNNetwork& network, const CNNLayerPtr& laye
if (parentData == nullptr) {
THROW_IE_EXCEPTION << "Input data is absent";
}
parentLayer = parentData->getCreatorLayer().lock();
parentLayer = getCreatorLayer(parentData).lock();
if (parentLayer == nullptr) {
THROW_IE_EXCEPTION << "Input layer for '" << layer->name << "' is absent";
}
@ -1465,7 +1465,7 @@ void CNNNetworkHelper::removeLayer(ICNNNetwork& network, const CNNLayerPtr& laye
const size_t childInputIndex = childrenIndexes[index];
DataPtr outData = parentLayer->outData[ouputLayerOutDataIndex];
outData->getInputTo().emplace(childLayer->name, childLayer);
getInputTo(outData).emplace(childLayer->name, childLayer);
childLayer->insData.insert(childLayer->insData.begin() + childInputIndex, outData);
updateInput(networkImpl, parentLayer, outData);
@ -1616,7 +1616,7 @@ int CNNNetworkHelper::getConstParentBranchID(const CNNLayer& layer) {
THROW_IE_LPT_EXCEPTION(layer) << "invalid input data with index " << i;
}
const CNNLayerPtr parent = insData->getCreatorLayer().lock();
const CNNLayerPtr parent = getCreatorLayer(insData).lock();
if (parent == nullptr) {
THROW_IE_LPT_EXCEPTION(layer) << "parent layer is absent";
}
@ -1627,7 +1627,7 @@ int CNNNetworkHelper::getConstParentBranchID(const CNNLayer& layer) {
if (parentConstInsData == nullptr) {
THROW_IE_LPT_EXCEPTION(*parent) << "input data is absent";
}
const CNNLayerPtr parentConst = parentConstInsData->getCreatorLayer().lock();
const CNNLayerPtr parentConst = getCreatorLayer(parentConstInsData).lock();
if (parentConst == nullptr) {
THROW_IE_LPT_EXCEPTION(*parent) << "input layer is absent";
}
@ -1693,7 +1693,7 @@ Precision CNNNetworkHelper::getPrecisionParent(const CNNLayer& layer, const size
DataPtr CNNNetworkHelper::getOutData(const CNNLayer& parentLayer, const CNNLayer& childLayer) {
DataPtr parentOutData;
for (DataPtr outData : parentLayer.outData) {
const std::map<std::string, CNNLayerPtr> inputTo = outData->getInputTo();
const std::map<std::string, CNNLayerPtr> inputTo = getInputTo(outData);
for (auto childIt : inputTo) {
if (childIt.second->name == childLayer.name) {
parentOutData = outData;

View File

@ -136,7 +136,7 @@ void QuantizationDetails::getInputIntervals(
if (inputLowData == nullptr) {
THROW_IE_LPT_EXCEPTION(quantize) << "input low data is absent";
}
const CNNLayerPtr inputLowLayer = inputLowData->getCreatorLayer().lock();
const CNNLayerPtr inputLowLayer = getCreatorLayer(inputLowData).lock();
validate(inputLowLayer);
const std::vector<float> inputLowBlobValues = getBlobValue(inputLowLayer);
inputLowValues.insert(inputLowValues.end(), inputLowBlobValues.begin(), inputLowBlobValues.end());
@ -145,7 +145,7 @@ void QuantizationDetails::getInputIntervals(
if (inputHighData == nullptr) {
THROW_IE_LPT_EXCEPTION(quantize) << "input high data is absent";
}
const CNNLayerPtr inputHighLayer = inputHighData->getCreatorLayer().lock();
const CNNLayerPtr inputHighLayer = getCreatorLayer(inputHighData).lock();
validate(inputHighLayer);
const std::vector<float> inputHighBlobValues = getBlobValue(inputHighLayer);
inputHighValues.insert(inputHighValues.end(), inputHighBlobValues.begin(), inputHighBlobValues.end());
@ -170,7 +170,7 @@ void QuantizationDetails::getOutputIntervals(
if (outputLowData == nullptr) {
THROW_IE_LPT_EXCEPTION(quantize) << "output low data is absent";
}
const CNNLayerPtr outputLowLayer = outputLowData->getCreatorLayer().lock();
const CNNLayerPtr outputLowLayer = getCreatorLayer(outputLowData).lock();
validate(outputLowLayer);
const std::vector<float>& outputLowBlobValues = getBlobValue(outputLowLayer);
outputLowValues.insert(outputLowValues.end(), outputLowBlobValues.begin(), outputLowBlobValues.end());
@ -179,7 +179,7 @@ void QuantizationDetails::getOutputIntervals(
if (outputHighData == nullptr) {
THROW_IE_LPT_EXCEPTION(quantize) << "output high data is absent";
}
const CNNLayerPtr outputHighLayer = outputHighData->getCreatorLayer().lock();
const CNNLayerPtr outputHighLayer = getCreatorLayer(outputHighData).lock();
validate(outputHighLayer);
const std::vector<float> outputHighBlobValues = getBlobValue(outputHighLayer);
outputHighValues.insert(outputHighValues.end(), outputHighBlobValues.begin(), outputHighBlobValues.end());

View File

@ -51,11 +51,11 @@ void ScaleShiftToConvolutionTransformation::transform(TransformationContext& con
return;
}
if (outData->getInputTo().size() == 1ul && parents[0]->type != "Concat") {
if (getInputTo(outData).size() == 1ul && parents[0]->type != "Concat") {
return;
}
if (layer.outData[0]->getInputTo().size() == 0ul) {
if (getInputTo(layer.outData[0]).size() == 0ul) {
return;
}

View File

@ -162,7 +162,7 @@ void BF16Transformer::optimizeToFloat(InferenceEngine::CNNNetwork &network) {
DataPtr tensor = *toAnalyzeTensors.begin();
toAnalyzeTensors.erase(tensor);
// look into producer of the tensor
auto layer = tensor->getCreatorLayer().lock();
auto layer = getCreatorLayer(tensor).lock();
// if this layer is not from _initbf16 - analyze inputs
if (_initbf16.find(layer->type) == _initbf16.end()) {
// for all inputs investigate and modify tensor precision if required
@ -180,7 +180,7 @@ void BF16Transformer::optimizeToFloat(InferenceEngine::CNNNetwork &network) {
// Instead of "if they do not go _only_ to the toAnalyzeTensors" we have to apply "if they do not go at least to one of _initbf16"
// TODO: add test input1->pooling1->conv1 and the same pooling1->relu. for example. now convolution should be returned to fp32
// after greedy mode, it should be fp32.
for (auto inputTo : tensor->getInputTo()) {
for (auto inputTo : getInputTo(tensor)) {
for (size_t o = 0; o < inputTo.second->outData.size(); o++) {
if (inputTo.second->outData[o]->getTensorDesc().getPrecision() == Precision::BF16) {
bool marked = tryToMarkFP32(inputTo.second->outData[o], immutable);
@ -207,13 +207,13 @@ bool BF16Transformer::tryToMarkFP32(InferenceEngine::DataPtr data, const std::se
// if there is one consumer, we can mark its input as float if it does not belong to the list of initial layers
// in other cases we need to mark tensor which is passed to several l ayers as FP32 only if there is at least one conusmer
// produces data in FP32. I.e. there should be a way fo getting FP32 from output data to this point
if (data->getInputTo().size() == 1) {
if (_initbf16.find(data->getInputTo().begin()->second->type) == _initbf16.end()) {
if (getInputTo(data).size() == 1) {
if (_initbf16.find(getInputTo(data).begin()->second->type) == _initbf16.end()) {
marked = true;
}
} else {
// get all consumers
for (auto o : data->getInputTo()) {
for (auto o : getInputTo(data)) {
// if tensor goes to several layers, we will mark it by FP32 only if one of the layer is unknown
if (_initbf16.find(o.second->type) == _initbf16.end() &&
_complementbf16.find(o.second->type) == _complementbf16.end() &&

View File

@ -255,7 +255,7 @@ bool MKLDNNExecNetwork::CanProcessDynBatch(const InferenceEngine::ICNNNetwork &n
if (inputs.empty())
return false;
auto & secondLayers = inputs.begin()->second->getInputData()->getInputTo();
auto & secondLayers = getInputTo(inputs.begin()->second->getInputData());
if (secondLayers.empty())
return false;

View File

@ -111,7 +111,7 @@ void MKLDNNGraph::Replicate(const TensorIterator::Body &subgraph, const MKLDNNEx
std::unordered_set<DataPtr> unused_data; // nodes which has no consumers (output or just unused)
auto _parent_port = [] (const DataPtr &data) -> int {
auto parent = data->getCreatorLayer().lock();
auto parent = getCreatorLayer(data).lock();
for (int i = 0; parent->outData.size(); i++)
if (data == parent->outData[i])
return i;
@ -136,7 +136,7 @@ void MKLDNNGraph::Replicate(const TensorIterator::Body &subgraph, const MKLDNNEx
for (int port = 0; port < layer->insData.size(); port++) {
auto data = layer->insData[port].lock();
auto parent_layer = data->getCreatorLayer().lock();
auto parent_layer = getCreatorLayer(data).lock();
if (!parent_layer) continue; // no parent means that it is input data node (or memory/const layer)
auto parent_node = layer2node[parent_layer];
@ -146,14 +146,14 @@ void MKLDNNGraph::Replicate(const TensorIterator::Body &subgraph, const MKLDNNEx
graphEdges.push_back(edge);
}
for (auto &out_data : layer->outData) {
if (out_data->getInputTo().empty()) {
if (getInputTo(out_data).empty()) {
unused_data.insert(out_data);
}
}
}
for (const auto &output : subgraph.outputs) {
auto parent_layer = output->getCreatorLayer().lock();
auto parent_layer = getCreatorLayer(output).lock();
auto parent_node = layer2node[parent_layer];
CNNLayerPtr layer(new CNNLayer({"out_" + output->getName(), "Output", output->getTensorDesc().getPrecision()}));
@ -173,7 +173,7 @@ void MKLDNNGraph::Replicate(const TensorIterator::Body &subgraph, const MKLDNNEx
// Add stub output node for unused data
for (auto to_stub_data : unused_data) {
auto parent_layer = to_stub_data->getCreatorLayer().lock();
auto parent_layer = getCreatorLayer(to_stub_data).lock();
auto parent_node = layer2node[parent_layer];
CNNLayerPtr layer(new CNNLayer({"stub_" + parent_layer->name, "Output", to_stub_data->getTensorDesc().getPrecision()}));
@ -196,7 +196,7 @@ void MKLDNNGraph::Replicate(const TensorIterator::Body &subgraph, const MKLDNNEx
const MKLDNNNodePtr node(MKLDNNNode::CreateNode(layer, getEngine(), extMgr, weightsCache));
for (auto p : input->getInputTo()) {
for (auto p : getInputTo(input)) {
auto consumer = p.second;
MKLDNNEdgePtr edge(new MKLDNNEdge(node, layer2node[consumer], 0, _child_port(input, consumer)));
node->addEdge(edge);
@ -220,7 +220,7 @@ void MKLDNNGraph::Replicate(const ICNNNetwork &network, const MKLDNNExtensionMan
// The input layer precision has to be equal to the InputData precision
std::map<std::string, Precision> changedPrecision;
for (const auto& input : inputs) {
auto inputLayer = input.second->getInputData()->getCreatorLayer().lock();
auto inputLayer = getCreatorLayer(input.second->getInputData()).lock();
if (inputLayer) {
inputLayer->precision = inputLayer->outData[0]->getTensorDesc().getPrecision();
}
@ -230,7 +230,7 @@ void MKLDNNGraph::Replicate(const ICNNNetwork &network, const MKLDNNExtensionMan
std::unordered_set<DataPtr> unused_data; // nodes which has no consumers (output or just unused)
auto _parent_port = [] (const DataPtr &data) -> int {
auto parent = data->getCreatorLayer().lock();
auto parent = getCreatorLayer(data).lock();
for (int i = 0; parent->outData.size(); i++)
if (data == parent->outData[i])
return i;
@ -258,7 +258,7 @@ void MKLDNNGraph::Replicate(const ICNNNetwork &network, const MKLDNNExtensionMan
for (int port = 0; port < layer->insData.size(); port++) {
auto data = layer->insData[port].lock();
auto parent_layer = data->getCreatorLayer().lock();
auto parent_layer = getCreatorLayer(data).lock();
if (!parent_layer) continue; // no parent means that it is input data node (or memory/const layer)
auto parent_node = layer2node[parent_layer];
@ -268,7 +268,7 @@ void MKLDNNGraph::Replicate(const ICNNNetwork &network, const MKLDNNExtensionMan
graphEdges.push_back(edge);
}
for (auto &out_data : layer->outData) {
if (out_data->getInputTo().empty()) {
if (getInputTo(out_data).empty()) {
unused_data.insert(out_data);
}
}
@ -280,7 +280,7 @@ void MKLDNNGraph::Replicate(const ICNNNetwork &network, const MKLDNNExtensionMan
for (const auto &output : outputs) {
const auto data = output.second;
auto parent_layer = data->getCreatorLayer().lock();
auto parent_layer = getCreatorLayer(data).lock();
auto parent_node = layer2node[parent_layer];
CNNLayerPtr layer(new CNNLayer({"out_" + output.first, "Output", data->getTensorDesc().getPrecision()}));
@ -300,7 +300,7 @@ void MKLDNNGraph::Replicate(const ICNNNetwork &network, const MKLDNNExtensionMan
// Add stub output node for unused data
for (auto to_stub_data : unused_data) {
auto parent_layer = to_stub_data->getCreatorLayer().lock();
auto parent_layer = getCreatorLayer(to_stub_data).lock();
auto parent_node = layer2node[parent_layer];
CNNLayerPtr layer(new CNNLayer({"stub_" + parent_layer->name, "Output", to_stub_data->getTensorDesc().getPrecision()}));
@ -316,7 +316,7 @@ void MKLDNNGraph::Replicate(const ICNNNetwork &network, const MKLDNNExtensionMan
// Replicate input nodes
for (const auto& input : inputs) {
auto inputLayer = input.second->getInputData()->getCreatorLayer().lock();
auto inputLayer = getCreatorLayer(input.second->getInputData()).lock();
inputNodes[input.first] = layer2node[inputLayer];
// Loading mean images

View File

@ -155,12 +155,12 @@ std::shared_ptr<ICNNNetwork> dump_graph_as_ie_net(const MKLDNNGraph &graph) {
std::string data_name = node->getName() + "_out" + std::to_string(i);
pr->outData[i] = std::make_shared<Data>(data_name, edge->getDesc());
data = pr->outData[i];
data->getCreatorLayer() = pr;
getCreatorLayer(data) = pr;
} else {
data = pr->outData[0];
}
data->getInputTo()[ch->name] = ch;
getInputTo(data)[ch->name] = ch;
ch->insData[in_port] = data;
}
}

View File

@ -343,9 +343,9 @@ void MKLDNNGraphOptimizer::FuseConvolutionAndZeroPoints(MKLDNNGraph &graph) {
if (convNode->inputZeroPoints.empty())
return;
auto weightsLayer = convLayer->insData[1].lock()->getCreatorLayer().lock();
auto weightsLayer = getCreatorLayer(convLayer->insData[1].lock()).lock();
if (weightsLayer->type != "Const") {
weightsLayer = weightsLayer->insData[0].lock()->getCreatorLayer().lock();
weightsLayer = getCreatorLayer(weightsLayer->insData[0].lock()).lock();
}
auto weightsBlob = dynamic_cast<TBlob<int8_t>*>(weightsLayer->blobs["custom"].get());

View File

@ -93,8 +93,8 @@ bool MKLDNNConvolutionNode::canBeExecutedInInt8() {
InferenceEngine::Precision MKLDNNConvolutionNode::fusedEltwisePrecision(MKLDNNEltwiseNode *eltwiseNode, int findex) {
InferenceEngine::Precision eltwisePrecision;
auto parent0 = eltwiseNode->getCnnLayer()->insData[0].lock()->getCreatorLayer().lock();
auto parent1 = eltwiseNode->getCnnLayer()->insData[1].lock()->getCreatorLayer().lock();
auto parent0 = getCreatorLayer(eltwiseNode->getCnnLayer()->insData[0].lock()).lock();
auto parent1 = getCreatorLayer(eltwiseNode->getCnnLayer()->insData[1].lock()).lock();
auto fusedParent = findex != 0 ? fusedWith[findex - 1].get()->getCnnLayer() : this->getCnnLayer();
eltwisePrecision = fusedParent == parent0 ? eltwiseNode->getCnnLayer()->insData[1].lock()->getPrecision() :

View File

@ -9,7 +9,7 @@
#pragma once
#include <ie_layers.h>
#include <ie_iextension.h>
#include <ie_input_info.hpp>
#include <ie_icnn_network.hpp>
@ -68,7 +68,7 @@ static void copyInputOutputInfo(const InputsDataMap & networkInputs, const Outpu
newPtr.reset(new InputInfo());
copyPreProcess(it.second->getPreProcess(), newPtr->getPreProcess());
DataPtr newData(new Data(*it.second->getInputData()));
newData->getInputTo().clear();
getInputTo(newData).clear();
newPtr->setInputData(newData);
}
_networkInputs[it.first] = newPtr;
@ -77,7 +77,7 @@ static void copyInputOutputInfo(const InputsDataMap & networkInputs, const Outpu
DataPtr newData;
if (it.second) {
newData.reset(new Data(*it.second));
newData->getInputTo().clear();
getInputTo(newData).clear();
}
_networkOutputs[it.first] = newData;
}

View File

@ -136,7 +136,7 @@ void FormatParser::SetLayerInput(CNNNetworkImpl& network, const std::string& dat
THROW_IE_EXCEPTION << "in Layer " << targetLayer->name
<< ": trying to connect an edge to non existing output port: " << dataId;
dataPtr->getInputTo()[targetLayer->name] = targetLayer;
getInputTo(dataPtr)[targetLayer->name] = targetLayer;
const LayerParseParameters& parseInfo = layersParseInfo[targetLayer->name];
if (targetLayer->insData.empty()) {
targetLayer->insData.resize(parseInfo.inputPorts.size());
@ -313,11 +313,11 @@ CNNNetworkImplPtr FormatParser::Parse(pugi::xml_node& root) {
}
_portsToData[outId] = ptr;
if (ptr->getCreatorLayer().lock())
if (getCreatorLayer(ptr).lock())
THROW_IE_EXCEPTION << "two layers set to the same output [" << outName << "], conflict at offset "
<< node.offset_debug();
ptr->getCreatorLayer() = layer;
getCreatorLayer(ptr) = layer;
layer->outData.push_back(ptr);
}
nodeCnt++;
@ -379,7 +379,7 @@ CNNNetworkImplPtr FormatParser::Parse(pugi::xml_node& root) {
TensorDesc::getLayoutByDims(pars_info.inputPorts[i].dims)}));
layer->insData[i] = data;
data->getInputTo()[layer->name] = layer;
getInputTo(data)[layer->name] = layer;
const auto insId = gen_id(pars_info.layerId, pars_info.inputPorts[i].portId);
_portsToData[insId] = data;

View File

@ -123,7 +123,7 @@ public:
// Mark data as network output. Just for check
for (const auto& kvp : outputs) {
auto& data = kvp.second;
auto layer = data->getCreatorLayer().lock();
auto layer = getCreatorLayer(data).lock();
auto& outs = layer->outData;
auto o_idx = std::find(outs.begin(), outs.end(), data) - outs.begin();
auto sts = net->addOutput(layer->name, o_idx, nullptr);
@ -144,7 +144,7 @@ public:
continue;
if (!holder)
holder = std::make_shared<Data>("const_holder", Precision::UNSPECIFIED);
holder->getInputTo()[it.first] = layer;
getInputTo(holder)[it.first] = layer;
}
}

View File

@ -119,7 +119,7 @@ void FrontEnd::detectNetworkBatch(
}
// 1. Don't support if DetectionOutput is not the last layer in network
if (!layer->outData.front()->getInputTo().empty()) {
if (!getInputTo(layer->outData.front()).empty()) {
VPU_THROW_FORMAT("Unsupported layer %s configuration : it is not a network output", layer->name);
}

View File

@ -512,7 +512,7 @@ void FrontEnd::getInputAndOutputData(
// Skip adding data if it not utilized
const bool isNetworkOutput = _ieParsedNetwork.networkOutputs.count(layerOutput->getName()) > 0;
const auto isLeaf = layerOutput->getInputTo().empty();
const auto isLeaf = getInputTo(layerOutput).empty();
if (!isNetworkOutput && isLeaf) {
outputs[i] = nullptr;
continue;

View File

@ -63,7 +63,7 @@ bool isConst(const ie::CNNLayerPtr& layer) {
}
bool isConst(const ie::DataPtr& data) {
const auto creator = data->getCreatorLayer().lock();
const auto creator = getCreatorLayer(data).lock();
return creator != nullptr && isConst(creator);
}
@ -96,7 +96,7 @@ void FrontEnd::parseTensorIterator(const Model& model, const ie::CNNLayerPtr& la
auto createConstData = [&](const ie::DataPtr& original) {
VPU_THROW_UNLESS(isConst(original), "VPU const data object can be created only from const IE data object");
const auto& creator = original->getCreatorLayer().lock();
const auto& creator = getCreatorLayer(original).lock();
const auto& descriptor = createDescriptor(original->getTensorDesc());
const auto& blob = ieBlobContent(creator->blobs.begin()->second, descriptor.type());

View File

@ -112,11 +112,11 @@ namespace vpu {
auto parent = stageMetaIndexToLayer[dataMetaData.parentIndex];
data = std::make_shared<::InferenceEngine::Data>(dataMetaData.name, dataMetaData.desc);
parent->outData.push_back(data);
data->getCreatorLayer() = parent;
getCreatorLayer(data) = parent;
for (auto &childMetaIndex : dataMetaData.childrenIndices) {
auto child = stageMetaIndexToLayer[childMetaIndex];
data->getInputTo()[child->name] = child;
getInputTo(data)[child->name] = child;
child->insData.push_back(data);
}
}

View File

@ -24,10 +24,15 @@ addIeTargetTest(
DEPENDENCIES
extension_tests
mock_engine
inference_engine_ir_reader
LABELS
IE
)
if(TARGET inference_engine_onnx_reader)
add_dependencies(${TARGET_NAME} inference_engine_onnx_reader)
endif()
include(CMakeParseArguments)
#

View File

@ -129,7 +129,7 @@ TEST(CNNNGraphImplTests, TestGetOutputAfterConvertNetwork) {
InferenceEngine::CNNNetwork cnnNet(ngraph);
// convert to old representation
cnnNet.getInputsInfo().begin()->second->getInputData()->getCreatorLayer();
getCreatorLayer(cnnNet.getInputsInfo().begin()->second->getInputData());
cnnNet.addOutput(testLayerName);
InferenceEngine::OutputsDataMap outs = cnnNet.getOutputsInfo();
@ -270,7 +270,7 @@ TEST(CNNNGraphImplTests, TestAddOutputFromConvertedNetwork) {
ASSERT_NE(nullptr, cnnNet.getFunction());
ASSERT_EQ(5, cnnNet.layerCount());
// convert to old representation
cnnNet.getInputsInfo().begin()->second->getInputData()->getCreatorLayer();
getCreatorLayer(cnnNet.getInputsInfo().begin()->second->getInputData());
auto outputs = cnnNet.getOutputsInfo();
ASSERT_EQ(2, outputs.size());
ASSERT_TRUE(outputs.find("relu2") != outputs.end());
@ -296,7 +296,7 @@ TEST(CNNNGraphImplTests, ConstantAsInternalAndExternalLayer) {
InferenceEngine::CNNNetwork cnnNet(ngraph);
// convert to old representation
cnnNet.getInputsInfo().begin()->second->getInputData()->getCreatorLayer();
getCreatorLayer(cnnNet.getInputsInfo().begin()->second->getInputData());
ASSERT_EQ(4, cnnNet.layerCount());
}
@ -419,7 +419,7 @@ TEST(CNNNGraphImplTests, SavePrimitivesPriority) {
auto network = ie.ReadNetwork(model, weights);
auto inputInfo = network.getInputsInfo();
auto cnnLayer = inputInfo.begin()->second->getInputData()->getCreatorLayer().lock();
auto cnnLayer = getCreatorLayer(inputInfo.begin()->second->getInputData()).lock();
ASSERT_TRUE(cnnLayer);
ASSERT_NE(cnnLayer->params.find("PrimitivesPriority"), cnnLayer->params.end());
ASSERT_EQ("cpu:avx2", cnnLayer->params["PrimitivesPriority"]);
@ -515,7 +515,7 @@ TEST(CNNNGraphImplTests, CanChangeInputPrecision) {
SCOPED_TRACE("Convert to old format");
// convert to old representation
cnnNet.getInputsInfo().begin()->second->getInputData()->getCreatorLayer();
getCreatorLayer(cnnNet.getInputsInfo().begin()->second->getInputData());
}
{
SCOPED_TRACE("After conversion");
@ -562,7 +562,7 @@ TEST(CNNNGraphImplTests, CanChangeInputLayout) {
SCOPED_TRACE("Convert to old format");
// convert to old representation
cnnNet.getInputsInfo().begin()->second->getInputData()->getCreatorLayer();
getCreatorLayer(cnnNet.getInputsInfo().begin()->second->getInputData());
}
{
SCOPED_TRACE("After conversion");
@ -609,7 +609,7 @@ TEST(CNNNGraphImplTests, CanChangeOutputPrecision) {
SCOPED_TRACE("Convert to old format");
// convert to old representation
cnnNet.getInputsInfo().begin()->second->getInputData()->getCreatorLayer();
getCreatorLayer(cnnNet.getInputsInfo().begin()->second->getInputData());
}
{
SCOPED_TRACE("After conversion");
@ -656,7 +656,7 @@ TEST(CNNNGraphImplTests, CanChangeOutputLayout) {
SCOPED_TRACE("Convert to old format");
// convert to old representation
cnnNet.getInputsInfo().begin()->second->getInputData()->getCreatorLayer();
getCreatorLayer(cnnNet.getInputsInfo().begin()->second->getInputData());
}
{
SCOPED_TRACE("After conversion");

View File

@ -42,7 +42,7 @@ TEST_P(CNNNetworkSerializerTest, Serialize) {
{
IE_SUPPRESS_DEPRECATED_START
// convert to old representation
originalNetwork.getInputsInfo().begin()->second->getInputData()->getCreatorLayer();
getCreatorLayer(originalNetwork.getInputsInfo().begin()->second->getInputData());
IE_SUPPRESS_DEPRECATED_END
}
originalNetwork.getInputsInfo().begin()->second->setPrecision(_netPrc);

View File

@ -258,6 +258,6 @@ TEST_F(NGraphReaderTests, ReadFQNetwork) {
IE_SUPPRESS_DEPRECATED_START
// convert to old representation
cnn.getInputsInfo().begin()->second->getInputData()->getCreatorLayer();
getCreatorLayer(cnn.getInputsInfo().begin()->second->getInputData());
IE_SUPPRESS_DEPRECATED_END
}

View File

@ -223,6 +223,6 @@ TEST_F(NGraphReaderTests, ReadReLUScalarNetwork) {
IE_SUPPRESS_DEPRECATED_START
// convert to old representation
cnn.getInputsInfo().begin()->second->getInputData()->getCreatorLayer();
getCreatorLayer(cnn.getInputsInfo().begin()->second->getInputData());
IE_SUPPRESS_DEPRECATED_END
}

View File

@ -44,7 +44,7 @@ protected:
private:
void triggerConversionToCNNNetwork() {
// convert to old representation
cnnNetwork.getInputsInfo().begin()->second->getInputData()->getCreatorLayer();
getCreatorLayer(cnnNetwork.getInputsInfo().begin()->second->getInputData());
}
static const char s_FriendlyName[];

View File

@ -110,9 +110,7 @@ public:
for (auto input : networkInputs) {
InputInfo::Ptr q = input.second;
DataPtr p = q->getInputData();
IE_SUPPRESS_DEPRECATED_START
layer = p->getInputTo().begin()->second;
IE_SUPPRESS_DEPRECATED_END
layer = getInputTo(p).begin()->second;
}
std::map<std::string, std::string> deviceMapping = {

View File

@ -119,7 +119,7 @@ void ConcatNeighboringGraphTransformation::validate() {
EXPECT_EQ(2, outputs.size());
for (const auto it : outputs) {
const InferenceEngine::CNNLayerPtr outputLayer = it.second->getCreatorLayer().lock();
const InferenceEngine::CNNLayerPtr outputLayer = getCreatorLayer(it.second).lock();
EXPECT_TRUE(outputLayer != nullptr);
EXPECT_EQ("ScaleShift", outputLayer->type);

View File

@ -106,7 +106,7 @@ void ConcatTransformation::validate() {
EXPECT_EQ(1, outputs.size());
std::map<std::string, InferenceEngine::DataPtr>::iterator it = outputs.begin();
const InferenceEngine::CNNLayerPtr outputLayer = it->second->getCreatorLayer().lock();
const InferenceEngine::CNNLayerPtr outputLayer = getCreatorLayer(it->second).lock();
EXPECT_TRUE(outputLayer != nullptr);
EXPECT_EQ("ScaleShift", outputLayer->type);

View File

@ -96,7 +96,7 @@ void ConvolutionTransformation::validate() {
EXPECT_EQ(1, outputs.size());
std::map<std::string, InferenceEngine::DataPtr>::iterator it = outputs.begin();
const InferenceEngine::CNNLayerPtr outputLayer = it->second->getCreatorLayer().lock();
const InferenceEngine::CNNLayerPtr outputLayer = getCreatorLayer(it->second).lock();
EXPECT_TRUE(outputLayer != nullptr);
EXPECT_EQ(fqOnActivations & fqOnWeights ? "ScaleShift" : "Convolution", outputLayer->type);

View File

@ -93,14 +93,14 @@ void DepthToSpaceTransformation::validate() {
EXPECT_EQ(1, outputs.size());
std::map<std::string, InferenceEngine::DataPtr>::iterator it = outputs.begin();
const InferenceEngine::CNNLayerPtr outputLayer = it->second->getCreatorLayer().lock();
const InferenceEngine::CNNLayerPtr outputLayer = getCreatorLayer(it->second).lock();
EXPECT_TRUE(outputLayer != nullptr);
EXPECT_EQ("ScaleShift", outputLayer->type);
EXPECT_EQ(1ul, outputLayer->insData.size());
const InferenceEngine::DataPtr insData = outputLayer->insData[0].lock();
EXPECT_TRUE(insData != nullptr);
const InferenceEngine::CNNLayerPtr depthToSpace = insData->getCreatorLayer().lock();
const InferenceEngine::CNNLayerPtr depthToSpace = getCreatorLayer(insData).lock();
EXPECT_TRUE(depthToSpace != nullptr);
EXPECT_EQ("DepthToSpace", depthToSpace->type);

View File

@ -88,7 +88,7 @@ void FullyConnectedTransformation::validate() {
EXPECT_EQ(1, outputs.size());
for (const auto it : outputs) {
const InferenceEngine::CNNLayerPtr outputLayer = it.second->getCreatorLayer().lock();
const InferenceEngine::CNNLayerPtr outputLayer = getCreatorLayer(it.second).lock();
EXPECT_TRUE(outputLayer != nullptr);
EXPECT_EQ("ScaleShift", outputLayer->type);

View File

@ -82,7 +82,7 @@ void GemmTransformation::validate() {
EXPECT_EQ(1, outputs.size());
for (const auto it : outputs) {
const InferenceEngine::CNNLayerPtr outputLayer = it.second->getCreatorLayer().lock();
const InferenceEngine::CNNLayerPtr outputLayer = getCreatorLayer(it.second).lock();
EXPECT_TRUE(outputLayer != nullptr);
EXPECT_EQ("ScaleShift", outputLayer->type);

View File

@ -95,7 +95,7 @@ void NormalizeTransformation::validate() {
EXPECT_EQ(1, outputs.size());
std::map<std::string, InferenceEngine::DataPtr>::iterator it = outputs.begin();
const InferenceEngine::CNNLayerPtr outputLayer = it->second->getCreatorLayer().lock();
const InferenceEngine::CNNLayerPtr outputLayer = getCreatorLayer(it->second).lock();
EXPECT_TRUE(outputLayer != nullptr);
EXPECT_EQ(shift ? "Normalize" : "ScaleShift", outputLayer->type);

View File

@ -116,11 +116,11 @@ void OutputLayersHandlingInTransformations::validate() {
params.precisionsOnActivations.begin(),
params.precisionsOnActivations.end(),
[](const float value) { return value == InferenceEngine::Precision::U8; })) {
EXPECT_EQ("ScaleShift", fakeQuantizeOnActivationsIt->second->getCreatorLayer().lock()->type);
EXPECT_EQ("ScaleShift", convolutionIt->second->getCreatorLayer().lock()->type);
EXPECT_EQ("ScaleShift", getCreatorLayer(fakeQuantizeOnActivationsIt->second).lock()->type);
EXPECT_EQ("ScaleShift", getCreatorLayer(convolutionIt->second).lock()->type);
} else {
EXPECT_EQ("FakeQuantize", fakeQuantizeOnActivationsIt->second->getCreatorLayer().lock()->type);
EXPECT_EQ("Convolution", convolutionIt->second->getCreatorLayer().lock()->type);
EXPECT_EQ("FakeQuantize", getCreatorLayer(fakeQuantizeOnActivationsIt->second).lock()->type);
EXPECT_EQ("Convolution", getCreatorLayer(convolutionIt->second).lock()->type);
}
IE_SUPPRESS_DEPRECATED_END

View File

@ -153,13 +153,13 @@ void OutputLayersHandlingInTransformationsForConcat::validate() {
params.precisionsOnActivations.begin(),
params.precisionsOnActivations.end(),
[](const float value) { return value == InferenceEngine::Precision::U8; })) {
EXPECT_EQ("ScaleShift", concatIt->second->getCreatorLayer().lock()->type);
EXPECT_EQ("ScaleShift", fakeQuantize2It->second->getCreatorLayer().lock()->type);
EXPECT_EQ("ScaleShift", convolutionIt->second->getCreatorLayer().lock()->type);
EXPECT_EQ("ScaleShift", getCreatorLayer(concatIt->second).lock()->type);
EXPECT_EQ("ScaleShift", getCreatorLayer(fakeQuantize2It->second).lock()->type);
EXPECT_EQ("ScaleShift", getCreatorLayer(convolutionIt->second).lock()->type);
} else {
EXPECT_EQ("Concat", concatIt->second->getCreatorLayer().lock()->type);
EXPECT_EQ("FakeQuantize", fakeQuantize2It->second->getCreatorLayer().lock()->type);
EXPECT_EQ("Convolution", convolutionIt->second->getCreatorLayer().lock()->type);
EXPECT_EQ("Concat", getCreatorLayer(concatIt->second).lock()->type);
EXPECT_EQ("FakeQuantize", getCreatorLayer(fakeQuantize2It->second).lock()->type);
EXPECT_EQ("Convolution", getCreatorLayer(convolutionIt->second).lock()->type);
}
IE_SUPPRESS_DEPRECATED_END

View File

@ -134,15 +134,15 @@ void OutputLayersHandlingInTransformationsForConcatMultiChannel::validate() {
const auto concatIt = outputs.find("concat");
EXPECT_TRUE(concatIt != outputs.end());
EXPECT_EQ("ScaleShift", concatIt->second->getCreatorLayer().lock()->type);
EXPECT_EQ("ScaleShift", getCreatorLayer(concatIt->second).lock()->type);
const auto fakeQuantize2It = outputs.find("fakeQuantize2");
EXPECT_TRUE(fakeQuantize2It != outputs.end());
EXPECT_EQ("ScaleShift", fakeQuantize2It->second->getCreatorLayer().lock()->type);
EXPECT_EQ("ScaleShift", getCreatorLayer(fakeQuantize2It->second).lock()->type);
const auto convolutionIt = outputs.find("convolution");
EXPECT_TRUE(convolutionIt != outputs.end());
EXPECT_EQ("ScaleShift", convolutionIt->second->getCreatorLayer().lock()->type);
EXPECT_EQ("ScaleShift", getCreatorLayer(convolutionIt->second).lock()->type);
IE_SUPPRESS_DEPRECATED_END
}

View File

@ -86,7 +86,7 @@ void PermuteTransformation::validate() {
EXPECT_EQ(1, outputs.size());
std::map<std::string, InferenceEngine::DataPtr>::iterator it = outputs.begin();
const InferenceEngine::CNNLayerPtr outputLayer = it->second->getCreatorLayer().lock();
const InferenceEngine::CNNLayerPtr outputLayer = getCreatorLayer(it->second).lock();
EXPECT_TRUE(outputLayer != nullptr);
EXPECT_EQ("ScaleShift", outputLayer->type);

View File

@ -80,7 +80,7 @@ void ReshapeTransformation::validate() {
EXPECT_EQ(1, outputs.size());
std::map<std::string, InferenceEngine::DataPtr>::iterator it = outputs.begin();
const InferenceEngine::CNNLayerPtr outputLayer = it->second->getCreatorLayer().lock();
const InferenceEngine::CNNLayerPtr outputLayer = getCreatorLayer(it->second).lock();
EXPECT_TRUE(outputLayer != nullptr);
EXPECT_EQ("ScaleShift", outputLayer->type);

View File

@ -49,7 +49,7 @@ void generateTestModel(const std::string &modelPath,
auto inputOutData = std::make_shared<InferenceEngine::Data>("InputOutData",
InferenceEngine::TensorDesc{netPrc, inputDims,
InferenceEngine::Layout::NCHW});
inputOutData->getCreatorLayer() = inputLayerPtr;
getCreatorLayer(inputOutData) = inputLayerPtr;
inputLayerPtr->outData[0] = inputOutData;
if (refLayersVec) refLayersVec->emplace_back(inputLayerPtr);
@ -88,12 +88,12 @@ void generateTestModel(const std::string &modelPath,
conv1LayerPtr->insData.resize(1);
conv1LayerPtr->outData.resize(1);
inputOutData->getInputTo()[conv1LayerPtr->name] = conv1LayerPtr;
getInputTo(inputOutData)[conv1LayerPtr->name] = conv1LayerPtr;
conv1LayerPtr->insData[0] = inputOutData;
auto conv1OutData = std::make_shared<InferenceEngine::Data>("Conv1OutData",
InferenceEngine::TensorDesc{netPrc, conv1OutShape,
InferenceEngine::Layout::NCHW});
conv1OutData->getCreatorLayer() = conv1LayerPtr;
getCreatorLayer(conv1OutData) = conv1LayerPtr;
conv1LayerPtr->outData[0] = conv1OutData;
auto conv1ParamConstLayerXML = ir_builder_v10
@ -128,12 +128,12 @@ void generateTestModel(const std::string &modelPath,
relu1LayerPtr->insData.resize(1);
relu1LayerPtr->outData.resize(1);
conv1OutData->getInputTo()[relu1LayerPtr->name] = relu1LayerPtr;
getInputTo(conv1OutData)[relu1LayerPtr->name] = relu1LayerPtr;
relu1LayerPtr->insData[0] = conv1OutData;
auto relu1OutData = std::make_shared<InferenceEngine::Data>("Relu1OutData",
InferenceEngine::TensorDesc{netPrc, conv1OutShape,
InferenceEngine::Layout::NCHW});
relu1OutData->getCreatorLayer() = relu1LayerPtr;
getCreatorLayer(relu1OutData) = relu1LayerPtr;
relu1LayerPtr->outData[0] = relu1OutData;
auto relu1LayerXML = ir_builder_v10
@ -159,12 +159,12 @@ void generateTestModel(const std::string &modelPath,
lrn1LayerPtr->insData.resize(1);
lrn1LayerPtr->outData.resize(1);
relu1OutData->getInputTo()[lrn1LayerPtr->name] = lrn1LayerPtr;
getInputTo(relu1OutData)[lrn1LayerPtr->name] = lrn1LayerPtr;
lrn1LayerPtr->insData[0] = relu1OutData;
auto lrn1OutData = std::make_shared<InferenceEngine::Data>("Lrn1OutData",
InferenceEngine::TensorDesc{netPrc, conv1OutShape,
InferenceEngine::Layout::NCHW});
lrn1OutData->getCreatorLayer() = lrn1LayerPtr;
getCreatorLayer(lrn1OutData) = lrn1LayerPtr;
lrn1LayerPtr->outData[0] = lrn1OutData;
auto lrn1ParamConstLayerXML = ir_builder_v10
@ -208,12 +208,12 @@ void generateTestModel(const std::string &modelPath,
pool1LayerPtr->insData.resize(1);
pool1LayerPtr->outData.resize(1);
lrn1OutData->getInputTo()[pool1LayerPtr->name] = pool1LayerPtr;
getInputTo(lrn1OutData)[pool1LayerPtr->name] = pool1LayerPtr;
pool1LayerPtr->insData[0] = lrn1OutData;
auto pool1OutData = std::make_shared<InferenceEngine::Data>("Pool1OutData",
InferenceEngine::TensorDesc{netPrc, pool1OutShape,
InferenceEngine::Layout::NCHW});
pool1OutData->getCreatorLayer() = pool1LayerPtr;
getCreatorLayer(pool1OutData) = pool1LayerPtr;
pool1LayerPtr->outData[0] = pool1OutData;
auto pool1LayerXML = ir_builder_v10
@ -251,7 +251,7 @@ void generateTestModel(const std::string &modelPath,
split1LayerPtr->insData.resize(1);
split1LayerPtr->outData.resize(2);
pool1OutData->getInputTo()[split1LayerPtr->name] = split1LayerPtr;
getInputTo(pool1OutData)[split1LayerPtr->name] = split1LayerPtr;
split1LayerPtr->insData[0] = pool1OutData;
auto split1OutData0 = std::make_shared<InferenceEngine::Data>("Split1OutData0",
InferenceEngine::TensorDesc{netPrc,
@ -261,8 +261,8 @@ void generateTestModel(const std::string &modelPath,
InferenceEngine::TensorDesc{netPrc,
split1OutShape,
InferenceEngine::Layout::NCHW});
split1OutData0->getCreatorLayer() = split1LayerPtr;
split1OutData1->getCreatorLayer() = split1LayerPtr;
getCreatorLayer(split1OutData0) = split1LayerPtr;
getCreatorLayer(split1OutData1) = split1LayerPtr;
split1LayerPtr->outData[0] = split1OutData0;
split1LayerPtr->outData[1] = split1OutData1;
@ -307,12 +307,12 @@ void generateTestModel(const std::string &modelPath,
conv2LayerPtr->insData.resize(1);
conv2LayerPtr->outData.resize(1);
split1OutData0->getInputTo()[conv2LayerPtr->name] = conv2LayerPtr;
getInputTo(split1OutData0)[conv2LayerPtr->name] = conv2LayerPtr;
conv2LayerPtr->insData[0] = split1OutData0;
auto conv2OutData = std::make_shared<InferenceEngine::Data>("Conv2OutData",
InferenceEngine::TensorDesc{netPrc, conv2OutShape,
InferenceEngine::Layout::NCHW});
conv2OutData->getCreatorLayer() = conv2LayerPtr;
getCreatorLayer(conv2OutData) = conv2LayerPtr;
conv2LayerPtr->outData[0] = conv2OutData;
auto conv2ParamConstLayerXML = ir_builder_v10
@ -364,12 +364,12 @@ void generateTestModel(const std::string &modelPath,
conv3LayerPtr->insData.resize(1);
conv3LayerPtr->outData.resize(1);
split1OutData1->getInputTo()[conv3LayerPtr->name] = conv3LayerPtr;
getInputTo(split1OutData1)[conv3LayerPtr->name] = conv3LayerPtr;
conv3LayerPtr->insData[0] = split1OutData1;
auto conv3OutData = std::make_shared<InferenceEngine::Data>("Conv3OutData",
InferenceEngine::TensorDesc{netPrc, conv3OutShape,
InferenceEngine::Layout::NCHW});
conv3OutData->getCreatorLayer() = conv3LayerPtr;
getCreatorLayer(conv3OutData) = conv3LayerPtr;
conv3LayerPtr->outData[0] = conv3OutData;
auto conv3ParamConstLayerXML = ir_builder_v10
@ -414,15 +414,15 @@ void generateTestModel(const std::string &modelPath,
concat1LayerPtr->insData.resize(2);
concat1LayerPtr->outData.resize(1);
conv2OutData->getInputTo()[concat1LayerPtr->name] = concat1LayerPtr;
conv3OutData->getInputTo()[concat1LayerPtr->name] = concat1LayerPtr;
getInputTo(conv2OutData)[concat1LayerPtr->name] = concat1LayerPtr;
getInputTo(conv3OutData)[concat1LayerPtr->name] = concat1LayerPtr;
concat1LayerPtr->insData[0] = conv2OutData;
concat1LayerPtr->insData[1] = conv3OutData;
auto concat1OutData = std::make_shared<InferenceEngine::Data>("Concat1OutData",
InferenceEngine::TensorDesc{netPrc,
concat1OutShape,
InferenceEngine::Layout::NCHW});
concat1OutData->getCreatorLayer() = concat1LayerPtr;
getCreatorLayer(concat1OutData) = concat1LayerPtr;
concat1LayerPtr->outData[0] = concat1OutData;
auto concat1LayerXML = ir_builder_v10

View File

@ -7,6 +7,7 @@
#include <string>
#include <vector>
#include "ie_layers.h"
#include "inference_engine.hpp"
namespace FuncTestUtils {

View File

@ -10,6 +10,7 @@
#include <memory>
#include <vector>
#include "ie_layers.h"
#include "ie_icnn_network.hpp"
namespace InferenceEngine {
@ -28,7 +29,7 @@ public:
}
void getOutputsInfo(OutputsDataMap& out) const noexcept override {
auto data = std::make_shared<Data>(MockNotEmptyICNNNetwork::OUTPUT_BLOB_NAME, Precision::UNSPECIFIED);
data->getInputTo()[""] = std::make_shared<CNNLayer>(LayerParams{
getInputTo(data)[""] = std::make_shared<CNNLayer>(LayerParams{
MockNotEmptyICNNNetwork::OUTPUT_BLOB_NAME,
"FullyConnected",
Precision::FP32 });
@ -42,7 +43,7 @@ public:
MockNotEmptyICNNNetwork::INPUT_BLOB_NAME,
"Input",
Precision::FP32 });
inData->getInputTo()[MockNotEmptyICNNNetwork::OUTPUT_BLOB_NAME] = inputLayer;
getInputTo(inData)[MockNotEmptyICNNNetwork::OUTPUT_BLOB_NAME] = inputLayer;
inData->setDims(MockNotEmptyICNNNetwork::INPUT_DIMENTIONS);
inData->setLayout(Layout::NCHW);
inputInfo->setInputData(inData);
@ -50,7 +51,7 @@ public:
auto outData = std::make_shared<Data>(MockNotEmptyICNNNetwork::OUTPUT_BLOB_NAME, Precision::UNSPECIFIED);
outData->setDims(MockNotEmptyICNNNetwork::OUTPUT_DIMENTIONS);
outData->setLayout(Layout::NCHW);
outData->getInputTo()[""] = std::make_shared<CNNLayer>(LayerParams{
getInputTo(outData)[""] = std::make_shared<CNNLayer>(LayerParams{
MockNotEmptyICNNNetwork::OUTPUT_BLOB_NAME,
"FullyConnected",
Precision::FP32 });

View File

@ -50,7 +50,7 @@ void RawMatcher::match() {
if (config._inputPrecision) q->setPrecision(config._inputPrecision);
DataPtr p = q->getInputData();
IE_SUPPRESS_DEPRECATED_START
layer = p->getInputTo().begin()->second;
layer = getInputTo(p).begin()->second;
IE_SUPPRESS_DEPRECATED_END
}

View File

@ -3,6 +3,7 @@
//
#include <gtest/gtest.h>
#include <ie_layers.h>
#include <details/ie_cnn_network_tools.h>
#include <cpp/ie_cnn_network.h>
#include <memory>
@ -20,7 +21,7 @@ public:
for (int i = 0; i < sorted.size(); i++) {
//check that all input already visited:
for (auto &inputs : sorted[i]->insData) {
auto inputName = inputs.lock()->getCreatorLayer().lock()->name;
auto inputName = getCreatorLayer(inputs.lock()).lock()->name;
bool bFound = false;
for (int j = 0; j < i; j++) {

View File

@ -463,13 +463,13 @@ private:
return (children.size() == 1) &&
(children[0]->type == "Convolution") &&
(children[0]->insData.size() >= 2) &&
(children[0]->insData[1].lock()->getCreatorLayer().lock()->name == layer.name);
(getCreatorLayer(children[0]->insData[1].lock()).lock()->name == layer.name);
}
static std::vector<CNNLayerPtr> getChildren(const CNNLayer& layer, const std::string& exceptionLayerName = "") {
std::vector<CNNLayerPtr> children;
for (const DataPtr outData : layer.outData) {
const std::map<std::string, CNNLayerPtr>& inputTo = outData->getInputTo();
const std::map<std::string, CNNLayerPtr>& inputTo = getInputTo(outData);
for (auto it = inputTo.begin(); it != inputTo.end(); ++it) {
CNNLayerPtr child = it->second;
if (exceptionLayerName.empty() || child->name != exceptionLayerName) {

View File

@ -121,9 +121,9 @@ void LowPrecisionTransformationValidation::validateWeightsToConst(
const std::vector<CNNLayerPtr> layers = InferenceEngine::details::CNNNetSortTopologically(network);
for (const CNNLayerPtr layer : layers) {
if ((layer->type == "FakeQuantize") && CNNNetworkHelper::onWeights(*layer) && (layer->outData.size() == 1) &&
(layer->outData[0]->getInputTo().begin()->second->type == "Convolution")) {
(getInputTo(layer->outData[0]).begin()->second->type == "Convolution")) {
CNNLayerPtr childLayer = CNNNetworkHelper::getChildren(*layer)[0];
if (params.quantizeOutputs || (childLayer->outData[0]->getInputTo().size() != 0)) {
if (params.quantizeOutputs || (getInputTo(childLayer->outData[0]).size() != 0)) {
ASSERT_TRUE(notTransformedLayers.find(childLayer->name) != notTransformedLayers.end()) <<
"FakeQuantize on weights was found: " << layer->name <<
" for layer " << childLayer->name;
@ -142,7 +142,7 @@ Precision getInputPrecision(const CNNLayer& layer) {
THROW_IE_EXCEPTION << "input data is nullable";
}
CNNLayerPtr layerParent = layerParentData->getCreatorLayer().lock();
CNNLayerPtr layerParent = getCreatorLayer(layerParentData).lock();
if (layerParent == nullptr) {
THROW_IE_EXCEPTION << "parent is nullable";
}
@ -155,7 +155,7 @@ Precision getInputPrecision(const CNNLayer& layer) {
// TODO: workaround for the first Convolution:
// Issue-26622: [IE COMMON][LPT] Check if ScaleShift is dequantization ScaleShift(dequantizationLayersNames) before to apply transformation
CNNLayerPtr eltwiseParent = eltwiseParentData->getCreatorLayer().lock();
CNNLayerPtr eltwiseParent = getCreatorLayer(eltwiseParentData).lock();
if (eltwiseParent->type == "Input") {
return Precision::U8;
}
@ -185,7 +185,7 @@ void LowPrecisionTransformationValidation::validatePrecision(
continue;
}
if ((!params.quantizeOutputs) && (layer->outData[0]->getInputTo().size() == 0ul)) {
if ((!params.quantizeOutputs) && (getInputTo(layer->outData[0]).size() == 0ul)) {
continue;
}
@ -199,7 +199,7 @@ void LowPrecisionTransformationValidation::validatePrecision(
if (!params.quantizeOutputs) {
const std::vector<CNNLayerPtr> children = CNNNetworkHelper::getChildrenRecursivelyExceptTypes(*layer, { "ScaleShift" });
if ((children.size() == 0ul) || (children[0]->outData.size() == 0ul) || (children[0]->outData[0]->getInputTo().size() == 0ul)) {
if ((children.size() == 0ul) || (children[0]->outData.size() == 0ul) || (getInputTo(children[0]->outData[0]).size() == 0ul)) {
continue;
}
}
@ -237,7 +237,7 @@ void LowPrecisionTransformationValidation::validatePrecision(
ASSERT_TRUE((children[0]->type == "Convolution") || (children[0]->type == "FullyConnected") || (children[0]->type == "GEMM")) <<
"unexpected child type " << children[0]->type << " '" << children[0]->name << "' for layer " << layer->type << " '" << layer->name << "' on weights";
if (children[0]->outData[0]->getInputTo().size() == 0) {
if (getInputTo(children[0]->outData[0]).size() == 0) {
// output data precision depends on device
continue;
}
@ -415,7 +415,7 @@ void LowPrecisionTransformationValidation::validateWithReference(
}
// TODO: last layer is ignored
if ((it->second->outData.size() != 0) && (it->second->outData[0]->getInputTo().size() == 0)) {
if ((it->second->outData.size() != 0) && (getInputTo(it->second->outData[0]).size() == 0)) {
continue;
}
@ -482,7 +482,7 @@ void LowPrecisionTransformationValidation::validateFakeQuantize(
const std::vector<CNNLayerPtr> children = CNNNetworkHelper::getChildren(*layer);
for (const CNNLayerPtr& child : children) {
for (const DataPtr data : child->outData) {
if (data->getInputTo().size() == 0ul) {
if (getInputTo(data).size() == 0ul) {
return;
}
}

View File

@ -143,7 +143,7 @@ private:
for (size_t outDataIndex = 0; outDataIndex < layer->outData.size(); ++outDataIndex) {
DataPtr outData = layer->outData[outDataIndex];
const std::map<std::string, CNNLayerPtr> inputTo = outData->getInputTo();
const std::map<std::string, CNNLayerPtr> inputTo = getInputTo(outData);
const Precision parentOutPrecision = getDataPrecision(precisionByPort, *layer, outDataIndex);
for (auto it = inputTo.begin(); it != inputTo.end(); it++) {

View File

@ -116,7 +116,7 @@ bool ConcatTestModel::transform(CNNNetwork& network, LayerTransformation::Params
//
}
} else if (dims.size() == 2ul) {
if (concatLayer->outData[0]->getInputTo().size() != 0ul) {
if (getInputTo(concatLayer->outData[0]).size() != 0ul) {
THROW_IE_EXCEPTION << "2D is not supported";
}
}

View File

@ -42,7 +42,7 @@ bool UpdateBiasesConvolutionTestModel::transform(CNNNetwork& network, LayerTrans
}
}
//CNNLayerPtr convolution = dequantizationLayer->insData[0].lock()->getCreatorLayer().lock();
//CNNLayerPtr convolution = getCreatorLayer(dequantizationLayer->insData[0].lock()).lock();
//CNNLayerPtr convolutionBiases = CNNNetworkHelper::getParent(*convolution, 2);
//if (convolutionBiases == nullptr) {
// THROW_IE_EXCEPTION << "biases const layer was not added";

View File

@ -22,7 +22,7 @@ public:
* @param wrongID - data index, which falsely displayed among inputs in y
*/
void CONNECT_WRONGLY(int x, int y, int wrongID) {
datas[x].front()->getInputTo()[std::to_string(y)] = layers[y];
getInputTo(datas[x].front())[std::to_string(y)] = layers[y];
layers[y]->insData.push_back(datas[wrongID].front());
lhsLayers.insert(layers[x]);
rhsLayers.insert(layers[y]);
@ -33,7 +33,7 @@ public:
* @param y - input layer index
*/
void CONNECT_WITHOUT_INS_DATA(int x, int y) {
datas[x].front()->getInputTo()[std::to_string(y)] = layers[y];
getInputTo(datas[x].front())[std::to_string(y)] = layers[y];
lhsLayers.insert(layers[x]);
rhsLayers.insert(layers[y]);
}
@ -45,7 +45,7 @@ public:
*/
void CONNECT_WITH_DATA_NAME(int x, int y, int name) {
datas[x].front()->setName(std::to_string(name));
datas[x].front()->getInputTo()[std::to_string(y)] = layers[y];
getInputTo(datas[x].front())[std::to_string(y)] = layers[y];
layers[y]->insData.push_back(datas[x].front());
lhsLayers.insert(layers[x]);
rhsLayers.insert(layers[y]);
@ -58,7 +58,7 @@ public:
*/
void CONNECT_WITH_LAYER_NAME(int x, int y, int name) {
layers[y]->name = std::to_string(name);
datas[x].front()->getInputTo()[std::to_string(y)] = layers[y];
getInputTo(datas[x].front())[std::to_string(y)] = layers[y];
layers[y]->insData.push_back(datas[x].front());
lhsLayers.insert(layers[x]);
rhsLayers.insert(layers[y]);
@ -69,8 +69,8 @@ public:
* @param y - input layer index
*/
void CONNECT_WITHOUT_CREATOR_LAYER_WHICH_EXIST(int x, int y) {
datas[x].front()->getInputTo()[std::to_string(y)] = layers[y];
datas[x].front()->getCreatorLayer() = std::weak_ptr<CNNLayer>();
getInputTo(datas[x].front())[std::to_string(y)] = layers[y];
getCreatorLayer(datas[x].front()) = std::weak_ptr<CNNLayer>();
layers[y]->insData.push_back(datas[x].front());
lhsLayers.insert(layers[x]);
rhsLayers.insert(layers[y]);
@ -81,8 +81,8 @@ public:
* @param y - input layer index
*/
void CONNECT_WITHOUT_CREATOR_LAYER(int x, int y) {
datas[x].front()->getInputTo()[std::to_string(y)] = layers[y];
datas[x].front()->getCreatorLayer() = std::weak_ptr<CNNLayer>();
getInputTo(datas[x].front())[std::to_string(y)] = layers[y];
getCreatorLayer(datas[x].front()) = std::weak_ptr<CNNLayer>();
layers[x] = nullptr;
layers[y]->insData.push_back(datas[x].front());
rhsLayers.insert(layers[y]);
@ -93,7 +93,7 @@ public:
* @param y - input layer index
*/
void CONNECT_WITH_INPUT_TYPE(int x, int y, std::string type) {
datas[x].front()->getInputTo()[std::to_string(y)] = layers[y];
getInputTo(datas[x].front())[std::to_string(y)] = layers[y];
layers[y]->insData.push_back(datas[x].front());
layers[x]->type = type;
lhsLayers.insert(layers[x]);

View File

@ -74,7 +74,7 @@ TEST_F(I8QuantisationTest, inputPrecisionIs16Bits){
auto newNet = q.quantize(network, 1000);
InputsDataMap inputs;
newNet->getInputsInfo(inputs);
auto inputLayer = inputs.begin()->second->getInputData()->getInputTo().begin()->second->insData.front().lock()->getCreatorLayer().lock();
auto inputLayer = getCreatorLayer(getInputTo(inputs.begin()->second->getInputData()).begin()->second->insData.front().lock()).lock();
ASSERT_EQ(inputLayer->precision, Precision::I16);
}
@ -105,7 +105,7 @@ TEST_F(I8QuantisationTest, outputAffinePrecisionIs32Bits){
auto newNet = q.quantize(network, 1000);
InputsDataMap inputs;
newNet->getInputsInfo(inputs);
auto affineDataPtr = inputs.begin()->second->getInputData()->getInputTo().begin()->second->outData.front();
auto affineDataPtr = getInputTo(inputs.begin()->second->getInputData()).begin()->second->outData.front();
ASSERT_EQ(affineDataPtr->getTensorDesc().getPrecision(), Precision::I32);
}

View File

@ -95,7 +95,7 @@ TEST_F(I16QuantisationTest, outputAffinePrecisionIs32Bits){
auto newNet = q.quantize(network, 1000);
InputsDataMap inputs;
newNet->getInputsInfo(inputs);
auto affineDataPtr = inputs.begin()->second->getInputData()->getInputTo().begin()->second->outData.front();
auto affineDataPtr = getInputTo(inputs.begin()->second->getInputData()).begin()->second->outData.front();
ASSERT_EQ(affineDataPtr->getTensorDesc().getPrecision(), Precision::I32);
}
@ -126,7 +126,7 @@ TEST_F(I16QuantisationTest, DISABLED_outputScaleFactorForAffineIsCorrect){
auto newNet = q.quantize(network, 1000);
InputsDataMap inputs;
newNet->getInputsInfo(inputs);
auto affineLayerPtr = inputs.begin()->second->getInputData()->getInputTo().begin()->second;
auto affineLayerPtr = getInputTo(inputs.begin()->second->getInputData()).begin()->second;
auto quantParams = getInjectedData<QuantizedLayerParams>(affineLayerPtr);

View File

@ -47,7 +47,7 @@ protected:
new_outputs.push_back(cnnLayer->name);
for (const auto &layer : cnnLayer->outData) {
for (const auto &data : layer->getInputTo()) {
for (const auto &data : getInputTo(layer)) {
addOutputToEachNode(network, new_outputs, data.second);
}
}
@ -250,7 +250,7 @@ TEST_F(MKLDNNGraphLeaksTests, MKLDNN_not_release_outputs_fp32) {
std::vector<std::string> new_outputs;
for (auto input : network.getInputsInfo()) {
for (const auto &layer : input.second->getInputData()->getInputTo()) {
for (const auto &layer : getInputTo(input.second->getInputData())) {
addOutputToEachNode(network, new_outputs, layer.second);
}
}

View File

@ -52,9 +52,9 @@ class GraphTestsBase : public ::testing::Test {
dims.push_back(batchSize);
data->setDims(dims);
for (auto output : (*layer)->outData) {
data->getInputTo() = output->getInputTo();
getInputTo(data) = getInputTo(output);
}
data->getCreatorLayer() = (*layer);
getCreatorLayer(data) = (*layer);
info->setInputData(data);
inputsMap[(*layer)->name] = info;
}
@ -132,7 +132,7 @@ class GraphTestsBase : public ::testing::Test {
continue;
}
}
auto &inputMap = outData->getInputTo();
auto &inputMap = getInputTo(outData);
nForward +=
std::count_if(inputMap.begin(), inputMap.end(), [&](std::map<std::string, CNNLayerPtr>::value_type &vt) {
return vt.second->name == b;
@ -154,7 +154,7 @@ class GraphTestsBase : public ::testing::Test {
}
auto countRef = [&](DataWeakPtr wp) {
return wp.lock()->getCreatorLayer().lock()->name == a;
return getCreatorLayer(wp.lock()).lock()->name == a;
};
if (from_port_id == -1) {
@ -175,7 +175,7 @@ class GraphTestsBase : public ::testing::Test {
auto newData = std::make_shared<Data>(name, TensorDesc(Precision::FP32, SizeVector({ 1, 1 }), Layout::NC));
CNNLayerPtr newLayer = make_shared<GenericLayer>(LayerParams({name, "Generic_" + std::to_string(numCreated++), Precision::FP32}));
newData->getCreatorLayer() = newLayer;
getCreatorLayer(newData) = newLayer;
newLayer->outData.push_back(newData);
return newLayer;
@ -197,7 +197,7 @@ class GraphTestsBase : public ::testing::Test {
dims.push_back(batchSize);
data->setDims(dims);
for (auto output : (*layer)->outData) {
data->getInputTo() = output->getInputTo();
getInputTo(data) = getInputTo(output);
}
info->setInputData(data);
inputsMap[(*layer)->name] = info;
@ -213,7 +213,7 @@ class GraphTestsBase : public ::testing::Test {
for (auto layer = rhsLayers.begin(); layer != rhsLayers.end(); layer++) {
bool notLast = false;
for (auto && outData : (*layer)->outData) {
if (!outData->getInputTo().empty()) {
if (!getInputTo(outData).empty()) {
notLast = true;
break;
}
@ -238,7 +238,7 @@ class GraphTestsBase : public ::testing::Test {
for (int i = 0; i < 10; i++) {
layers.push_back(make_shared<CNNLayer>(LayerParams({std::to_string(i)}, "", Precision::UNSPECIFIED)));
datas[i].push_back(make_shared<Data>(std::to_string(i), Precision::FP32, Layout::NC));
datas[i].back()->getCreatorLayer() = layers[i];
getCreatorLayer(datas[i].back()) = layers[i];
SizeVector dims = datas[i].back()->getDims();
dims.push_back(_batchSize);
@ -253,7 +253,7 @@ class GraphTestsBase : public ::testing::Test {
// Reset shared_pointer circular dependencies to mitigate memory leaks.
for (auto& items : datas) {
for (auto& data : items) {
for (auto& input : data->getInputTo()) {
for (auto& input : getInputTo(data)) {
input.second.reset();
}
}
@ -277,7 +277,7 @@ class GraphTestsBase : public ::testing::Test {
* @param y input layer index
*/
void CONNECT(int x, int y) {
datas[x].front()->getInputTo()[std::to_string(y)] = layers[y];
getInputTo(datas[x].front())[std::to_string(y)] = layers[y];
layers[y]->insData.push_back(datas[x].front());
lhsLayers.insert(layers[x]);
rhsLayers.insert(layers[y]);
@ -286,7 +286,7 @@ class GraphTestsBase : public ::testing::Test {
void CONNECT_FROM_PORT(int x, int port, int y) {
if (datas[x].size() <= port) {
datas[x].push_back(make_shared<Data>(std::string("split_") + std::to_string(datas[x].size()), Precision::FP32, Layout::NC));
datas[x].back()->getCreatorLayer() = layers[x];
getCreatorLayer(datas[x].back()) = layers[x];
SizeVector dims = datas[x].back()->getDims();
dims.push_back(_batchSize);
@ -294,7 +294,7 @@ class GraphTestsBase : public ::testing::Test {
datas[x].back()->setDims(dims);
layers[x]->outData.push_back(datas[x].back());
}
datas[x][port]->getInputTo()[std::to_string(y)] = layers[y];
getInputTo(datas[x][port])[std::to_string(y)] = layers[y];
layers[y]->insData.push_back(datas[x][port]);
lhsLayers.insert(layers[x]);
rhsLayers.insert(layers[y]);

View File

@ -121,12 +121,12 @@ TEST_F(LayerTransformTest, injectioWillCopyOutData) {
fc->_out_num = 9;
auto data = std::make_shared<Data>("N1", Precision::FP32);
data->getCreatorLayer() = fc;
getCreatorLayer(data) = fc;
fc->outData.push_back(data);
auto layerWithData = injectData<SomeData>(fc, SomeData({11, "myname", 12.f}));
ASSERT_EQ(data->getCreatorLayer().lock(), layerWithData->outData[0]->getCreatorLayer().lock());
ASSERT_EQ(getCreatorLayer(data).lock(), getCreatorLayer(layerWithData->outData[0]).lock());
ASSERT_NE(data.get(), layerWithData->outData[0].get());
}
@ -137,7 +137,7 @@ TEST_F(LayerTransformTest, injectioWillCopyInputData) {
fc->_out_num = 9;
auto data = std::make_shared<Data>("N1", Precision::FP32);
data->getCreatorLayer() = fc;
getCreatorLayer(data) = fc;
fc->insData.push_back(data);
auto layerWithData = injectData<SomeData>(fc, SomeData({11, "myname", 12.f}));

View File

@ -249,9 +249,9 @@ TEST_F(RemoveLayerTests, canTrimL2) {
ASSERT_EQ(nullptr, net->getData("data7"));
net->removeData("data7");
ASSERT_EQ(net->allLayers().size(), originalLayersNum);
ASSERT_EQ(data2->getInputTo().size(), 1);
ASSERT_EQ(data2->getInputTo().find("layer1")->second, layer1);
ASSERT_EQ(data5->getCreatorLayer().lock(), newLayer);
ASSERT_EQ(getInputTo(data2).size(), 1);
ASSERT_EQ(getInputTo(data2).find("layer1")->second, layer1);
ASSERT_EQ(getCreatorLayer(data5).lock(), newLayer);
ASSERT_EQ(layer4->insData.size(), 2);
ASSERT_EQ(layer4->insData[1].lock(), data5);
ASSERT_EQ(layer1->insData.size(), 2);
@ -261,8 +261,8 @@ TEST_F(RemoveLayerTests, canTrimL2) {
ASSERT_EQ(layer1->outData[0], getData("data4"));
ASSERT_EQ(newLayer->outData.size(), 1);
ASSERT_EQ(newLayer->outData[0], data5);
ASSERT_EQ(data3->getInputTo().size(), 1);
ASSERT_EQ(data3->getInputTo().find("layer3")->second, getLayer("layer3"));
ASSERT_EQ(getInputTo(data3).size(), 1);
ASSERT_EQ(getInputTo(data3).find("layer3")->second, getLayer("layer3"));
}
TEST_F(RemoveLayerTests, canTrimI1andL1) {
@ -301,10 +301,10 @@ TEST_F(RemoveLayerTests, canTrimI1andL1) {
ASSERT_EQ(nullptr, net->getData("data1"));
net->removeData("data1");
ASSERT_EQ(net->allLayers().size(), originalLayersNum);
ASSERT_EQ(data2->getInputTo().size(), 1);
ASSERT_EQ(data2->getInputTo().find("layer2")->second, layer2);
ASSERT_EQ(newData4->getCreatorLayer().lock(), newLayerD4);
ASSERT_EQ(newData7->getCreatorLayer().lock(), newLayerD7);
ASSERT_EQ(getInputTo(data2).size(), 1);
ASSERT_EQ(getInputTo(data2).find("layer2")->second, layer2);
ASSERT_EQ(getCreatorLayer(newData4).lock(), newLayerD4);
ASSERT_EQ(getCreatorLayer(newData7).lock(), newLayerD7);
ASSERT_EQ(newLayerD4->outData.size(), 1);
ASSERT_EQ(newLayerD7->outData.size(), 1);
ASSERT_EQ(newLayerD4->outData[0], newData4);
@ -450,8 +450,8 @@ TEST_F(RemoveLayerTests, canTrimShapeInput2) {
auto data6 = net->getData("data6");
auto data2 = net->getData("data2");
ASSERT_EQ(data2->getInputTo().size(), 1);
ASSERT_EQ(data2->getInputTo().at(layer2->name), layer2);
ASSERT_EQ(getInputTo(data2).size(), 1);
ASSERT_EQ(getInputTo(data2).at(layer2->name), layer2);
ASSERT_EQ(net->allLayers().size(), originalLayersNum);
ASSERT_EQ(layer1->insData.size(), 1);
ASSERT_EQ(layer1->insData[0].lock(), getData("data1"));
@ -476,8 +476,8 @@ TEST_F(RemoveLayerTests, notTrimFirstConstInput) {
ASSERT_EQ(net->allLayers().size(), originalLayersNum);
IE::CNNNetwork cnnNetwork(net);
auto input4 = CommonTestUtils::getLayerByName(cnnNetwork, constLayer->name.c_str());
ASSERT_EQ(data10->getInputTo().size(), 1);
ASSERT_EQ(data10->getCreatorLayer().lock(), input4);
ASSERT_EQ(getInputTo(data10).size(), 1);
ASSERT_EQ(getCreatorLayer(data10).lock(), input4);
ASSERT_EQ(layer6->insData.size(), 2);
ASSERT_EQ(layer6->insData[0].lock(), data10);
ASSERT_EQ(layer6->insData[1].lock(), getData("data9"));
@ -499,9 +499,9 @@ TEST_F(RemoveLayerTests, canSaveConstForEltWise) {
ASSERT_EQ(net->allLayers().size(), 10);
ASSERT_EQ(layer1->insData.size(), 2);
ASSERT_EQ(layer1->insData[1].lock(), data2);
ASSERT_EQ(data2->getInputTo().size(), 2);
ASSERT_EQ(data2->getInputTo().at(layer1->name), layer1);
ASSERT_EQ(data2->getCreatorLayer().lock(), input2);
ASSERT_EQ(getInputTo(data2).size(), 2);
ASSERT_EQ(getInputTo(data2).at(layer1->name), layer1);
ASSERT_EQ(getCreatorLayer(data2).lock(), input2);
}
TEST_F(RemoveLayerTests, canSaveDataWithMultipleInputTo) {
@ -521,9 +521,9 @@ TEST_F(RemoveLayerTests, canSaveDataWithMultipleInputTo) {
ASSERT_EQ(layer2->insData.size(), 2);
ASSERT_EQ(layer2->insData[0].lock(), getData("data2"));
ASSERT_EQ(layer2->insData[1].lock(), getData("data7"));
ASSERT_EQ(data3->getInputTo().size(), 1);
ASSERT_EQ(data3->getInputTo().at(layer3->name), layer3);
ASSERT_EQ(data3->getCreatorLayer().lock(), input3);
ASSERT_EQ(getInputTo(data3).size(), 1);
ASSERT_EQ(getInputTo(data3).at(layer3->name), layer3);
ASSERT_EQ(getCreatorLayer(data3).lock(), input3);
ASSERT_EQ(layer3->insData.size(), 1);
ASSERT_EQ(layer3->insData[0].lock(), data3);
}

View File

@ -60,7 +60,7 @@ public:
auto data = _data[dataName];
nextlayer->insData.push_back(data);
data->getInputTo().insert({nextlayerName, nextlayer});
getInputTo(data).insert({nextlayerName, nextlayer});
return *this;
}
@ -71,10 +71,10 @@ public:
auto prevlayer = _layers[prevlayerName];
auto data = _data[dataName];
assert(nullptr == data->getCreatorLayer().lock());
assert(nullptr == getCreatorLayer(data).lock());
prevlayer->outData.push_back(data);
data->getCreatorLayer() = prevlayer;
getCreatorLayer(data) = prevlayer;
return *this;
}
@ -111,7 +111,7 @@ public:
for (auto&& it: _data) {
auto& data = it.second;
net->getData(it.first) = data;
if (nullptr == data->getCreatorLayer().lock()) {
if (nullptr == getCreatorLayer(data).lock()) {
auto input = std::make_shared<IE::InputInfo>();
input->setInputData(data);
net->setInputInfo(input);