Remove deprecated methods from node (#1369)

This commit is contained in:
Ilya Churaev 2020-07-23 14:44:32 +03:00 committed by GitHub
parent cdd5605c61
commit 82aa1e112d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
13 changed files with 55 additions and 120 deletions

View File

@ -352,11 +352,11 @@ CNNNetworkNGraphImpl::reshape(const std::map<std::string, std::vector<size_t>>&
std::unordered_set<std::string> opName;
for (const auto & layer : specialized_ngraph_function->get_ordered_ops()) {
if (std::dynamic_pointer_cast<::ngraph::op::Result>(layer)) {
IE_ASSERT(layer->get_inputs().size() == 1);
const auto& input = layer->get_inputs()[0];
std::string outName = input.get_output().get_node()->get_friendly_name();
if (input.get_output().get_node()->get_output_size() != 1)
outName += "." + std::to_string(input.get_output().get_index());
IE_ASSERT(layer->inputs().size() == 1);
const auto& output = layer->input(0).get_source_output();
std::string outName = output.get_node()->get_friendly_name();
if (output.get_node()->get_output_size() != 1)
outName += "." + std::to_string(output.get_index());
addOutput(outName);
continue;
}

View File

@ -325,7 +325,7 @@ InferenceEngine::details::CNNLayerCreator::CNNLayerCreator(const std::shared_ptr
Builder::NodeConverter<::ngraph::op::Constant> converter;
const auto weightsNode = castedLayer->get_inputs()[1].get_output().get_node();
const auto weightsNode = castedLayer->input(1).get_source_output().get_node_shared_ptr();
if (converter.canCreate(weightsNode)) {
const auto& weights = converter.createLayer(weightsNode);
res->blobs["weights"] = weights->blobs["custom"];
@ -351,7 +351,7 @@ InferenceEngine::details::CNNLayerCreator::CNNLayerCreator(const std::shared_ptr
res->params = params;
return res;
});
addSpecificCreator({"Assign"}, [](const std::shared_ptr<::ngraph::Node>& node,
const std::map<std::string, std::string> params) -> CNNLayerPtr {
LayerParams attrs = {node->get_friendly_name(), "Memory",
@ -706,8 +706,8 @@ void convertFunctionToICNNNetwork(const std::shared_ptr<const ::ngraph::Function
const std::unordered_set<std::string> &names,
bool keep_constant) -> bool {
if (auto constantNode = ::ngraph::as_type_ptr<::ngraph::op::Constant>(node)) {
for (const auto &consumerInputPort : constantNode->get_outputs()[0].get_inputs()) {
const auto &consumerLayer = consumerInputPort->get_node();
for (const auto &consumerInputPort : constantNode->output(0).get_target_inputs()) {
const auto &consumerLayer = consumerInputPort.get_node()->shared_from_this();
if (names.find(consumerLayer->get_name()) == names.end())
continue;
if (!isInternalConstLayer(constantNode, consumerLayer, keep_constant))
@ -779,7 +779,7 @@ void convertFunctionToICNNNetwork(const std::shared_ptr<const ::ngraph::Function
size_t inputCount(0);
for (size_t i = 0; i < layer->get_input_size(); i++) {
const auto &constant = ngraph::as_type_ptr<ngraph::op::Constant>(layer->get_inputs()[i].get_output().get_node());
const auto &constant = ngraph::as_type_ptr<ngraph::op::Constant>(layer->input(i).get_source_output().get_node_shared_ptr());
if (constant && isInternalConstLayer(constant, layer, keep_constants)) {
continue;
}
@ -855,7 +855,7 @@ void convertFunctionToICNNNetwork(const std::shared_ptr<const ::ngraph::Function
if (std::dynamic_pointer_cast<::ngraph::op::ReadValue>(layer))
continue;
if (std::dynamic_pointer_cast<::ngraph::op::Result>(layer)) {
IE_ASSERT(layer->get_inputs().size() == 1);
IE_ASSERT(layer->inputs().size() == 1);
const auto &input = layer->input_value(0);
std::string outName = input.get_node_shared_ptr()->get_friendly_name();
if (input.get_node_shared_ptr()->get_output_size() != 1)

View File

@ -261,7 +261,7 @@ CNNLayer::Ptr NodeConverter<ngraph::op::TensorIterator>::createLayer(const std::
// Port map: outputs
for (const auto& desc : tensor_iterator->get_output_descriptions()) {
auto result = results[desc->m_body_value_index]->inputs()[0].get_source_output();
auto result = results[desc->m_body_value_index]->input(0).get_source_output();
// GetOutputElement layer can be inserted by ngraph deep copy functions
// (e.g. specialize_function, clone_function)
@ -785,7 +785,7 @@ CNNLayer::Ptr NodeConverter<ngraph::op::ConvolutionIE>::createLayer(
res->_weights = weights->blobs["custom"];
if (castedLayer->inputs().size() == 3) {
const auto biasNode = castedLayer->get_inputs()[2].get_output().get_node();
const auto biasNode = castedLayer->input_value(2).get_node_shared_ptr();
if (converter.canCreate(biasNode)) {
const auto& bias = converter.createLayer(biasNode);
res->blobs["biases"] = bias->blobs["custom"];
@ -853,7 +853,7 @@ CNNLayer::Ptr NodeConverter<ngraph::op::DeconvolutionIE>::createLayer(
res->_weights = weights->blobs["custom"];
if (castedLayer->inputs().size() == 3) {
const auto biasNode = castedLayer->get_inputs()[2].get_output().get_node();
const auto biasNode = castedLayer->input_value(2).get_node_shared_ptr();
if (converter.canCreate(biasNode)) {
const auto& bias = converter.createLayer(biasNode);
res->blobs["biases"] = bias->blobs["custom"];
@ -1137,7 +1137,7 @@ CNNLayer::Ptr NodeConverter<ngraph::op::PRelu>::createLayer(const std::shared_pt
auto castedLayer = ngraph::as_type_ptr<ngraph::op::PRelu>(layer);
if (castedLayer == nullptr) THROW_IE_EXCEPTION << "Cannot get " << params.type << " layer " << params.name;
const auto weightsNode = castedLayer->input(1).get_source_output().get_node_shared_ptr();
const auto weightsNode = castedLayer->input_value(1).get_node_shared_ptr();
if (auto const_weights = ngraph::as_type_ptr<ngraph::op::Constant>(weightsNode)) {
SizeVector dataShape = const_weights->get_shape();
if (dataShape.size() >= 2 && ngraph::shape_size(dataShape) == dataShape[1]) {
@ -1282,7 +1282,7 @@ CNNLayer::Ptr NodeConverter<ngraph::op::v1::Reshape>::createLayer(const std::sha
THROW_IE_EXCEPTION << "Cannot get " << params.type << " layer " << params.name;
const auto constNode = castedLayer->get_inputs()[1].get_output().get_node();
const auto constNode = castedLayer->input_value(1).get_node_shared_ptr();
if (auto constValue = ngraph::as_type_ptr<ngraph::op::Constant>(constNode)) {
auto value = constValue->cast_vector<int64_t>();
for (auto & i : value) {
@ -1344,14 +1344,14 @@ CNNLayer::Ptr NodeConverter<ngraph::op::ScaleShiftIE>::createLayer(const std::sh
auto res = std::make_shared<InferenceEngine::ScaleShiftLayer>(params);
NodeConverter<ngraph::op::Constant> converter;
const auto weightsNode = layer->get_inputs()[1].get_output().get_node();
const auto weightsNode = layer->input_value(1).get_node_shared_ptr();
if (converter.canCreate(weightsNode)) {
const auto& weightsLayer = converter.createLayer(weightsNode);
res->blobs["weights"] = weightsLayer->blobs["custom"];
res->_weights = weightsLayer->blobs["custom"];
}
const auto biasNode = layer->get_inputs()[2].get_output().get_node();
const auto biasNode = layer->input_value(2).get_node_shared_ptr();
if (converter.canCreate(biasNode)) {
const auto& bias = converter.createLayer(biasNode);
res->blobs["biases"] = bias->blobs["custom"];
@ -1641,13 +1641,13 @@ CNNLayer::Ptr NodeConverter<ngraph::op::FullyConnected>::createLayer(const std::
NodeConverter<ngraph::op::Constant> converter;
const auto weightsNode = layer->get_inputs()[1].get_output().get_node();
const auto weightsNode = layer->input_value(1).get_node_shared_ptr();
if (!keep_constants && converter.canCreate(weightsNode)) {
const auto& weights = converter.createLayer(weightsNode);
res->blobs["weights"] = weights->blobs["custom"];
res->_weights = weights->blobs["custom"];
const auto biasNode = layer->get_inputs()[2].get_output().get_node();
const auto biasNode = layer->input_value(2).get_node_shared_ptr();
if (converter.canCreate(biasNode)) {
const auto& bias = converter.createLayer(biasNode);
res->blobs["biases"] = bias->blobs["custom"];
@ -1694,14 +1694,14 @@ CNNLayer::Ptr NodeConverter<ngraph::op::LSTMCellIE>::createLayer(const std::shar
res->params["clip"] = asString(castedLayer->get_clip());
NodeConverter<ngraph::op::Constant> converter;
const auto weightsNode = layer->get_inputs()[3].get_output().get_node();
const auto weightsNode = layer->input_value(3).get_node_shared_ptr();
if (converter.canCreate(weightsNode)) {
const auto& weights = converter.createLayer(weightsNode);
res->blobs["weights"] = weights->blobs["custom"];
res->_weights = weights->blobs["custom"];
}
const auto biasNode = layer->get_inputs()[4].get_output().get_node();
const auto biasNode = layer->input_value(4).get_node_shared_ptr();
if (converter.canCreate(biasNode)) {
const auto& bias = converter.createLayer(biasNode);
res->blobs["biases"] = bias->blobs["custom"];
@ -1862,7 +1862,7 @@ CNNLayer::Ptr NodeConverter<ngraph::op::NormalizeIE>::createLayer(const std::sha
res->params["across_spatial"] = castedLayer->get_across_spatial() ? "1" : "0";
NodeConverter<ngraph::op::Constant> converter;
const auto weightsNode = castedLayer->get_inputs()[1].get_output().get_node();
const auto weightsNode = castedLayer->input_value(1).get_node_shared_ptr();
if (converter.canCreate(weightsNode)) {
const auto& weights = converter.createLayer(weightsNode);
res->blobs["weights"] = weights->blobs["custom"];

View File

@ -24,8 +24,8 @@ ngraph::graph_rewrite_callback callback = [](ngraph::pattern::Matcher& m) {
return false;
}
for (const auto& input : eltwise_node->get_inputs()) {
const auto& inputLayer = input.get_output().get_node();
for (const auto& input : eltwise_node->inputs()) {
const auto& inputLayer = input.get_source_output().get_node_shared_ptr();
auto const_node = std::dynamic_pointer_cast<ngraph::opset1::Constant>(inputLayer);
if (!const_node) continue;

View File

@ -28,9 +28,9 @@ void ngraph::pass::ConvertOneHotToOneHotIE::convert_one_hot() {
element::Type output_type = is_f16 ? element::f16 : element::f32;
const auto depth_node = std::dynamic_pointer_cast<ngraph::opset1::Constant>(one_hot->get_inputs()[1].get_output().get_node());
const auto on_value_node = std::dynamic_pointer_cast<ngraph::opset1::Constant>(one_hot->get_inputs()[2].get_output().get_node());
const auto off_value_node = std::dynamic_pointer_cast<ngraph::opset1::Constant>(one_hot->get_inputs()[3].get_output().get_node());
const auto depth_node = std::dynamic_pointer_cast<ngraph::opset1::Constant>(one_hot->input(1).get_source_output().get_node_shared_ptr());
const auto on_value_node = std::dynamic_pointer_cast<ngraph::opset1::Constant>(one_hot->input(2).get_source_output().get_node_shared_ptr());
const auto off_value_node = std::dynamic_pointer_cast<ngraph::opset1::Constant>(one_hot->input(3).get_source_output().get_node_shared_ptr());
// can be converted iff inputs with depth, on/off values are constants
if (depth_node == nullptr || on_value_node == nullptr || off_value_node == nullptr) return false;

View File

@ -142,10 +142,10 @@ void FrontEnd::detectNetworkBatch(
VPU_THROW_FORMAT("Unsupported layer %s configuration: no outputs", layer->get_name());
// 1. Don't support if DetectionOutput is not the last layer in network
for (const auto& outputHandle : layer->get_outputs()) {
for (const auto& inputHandle : outputHandle.get_inputs()) {
auto outNode = inputHandle->get_node();
if (std::dynamic_pointer_cast<::ngraph::opset3::Result>(outNode)) {
for (const auto& outputHandle : layer->outputs()) {
for (const auto& inputHandle : outputHandle.get_target_inputs()) {
auto outNode = inputHandle.get_node();
if (dynamic_cast<::ngraph::opset3::Result *>(outNode)) {
continue;
}
VPU_THROW_FORMAT("Unsupported layer %s configuration : it is not a network output", layer->get_name());

View File

@ -91,7 +91,9 @@ namespace FuncTestUtils {
if (layer->blobs.size() != refLayer->blobs.size()) {
err_log.push_back(
"Layer " + layer->name + " and ref layer " + refLayer->name + " have different number of blobs: " +
"Layer " + layer->type + " with name " + layer->name +
" and ref layer " + layer->type + " with name " + refLayer->name +
" have different number of blobs: " +
std::to_string(layer->blobs.size()) + " and " + std::to_string(refLayer->blobs.size()));
}

View File

@ -172,7 +172,7 @@ void Node::set_arguments(const OutputVector& arguments)
for (auto& output : arguments)
{
auto output_node = output.get_node();
auto& output_descriptor = output_node->get_outputs().at(output.get_index());
auto& output_descriptor = output_node->m_outputs.at(output.get_index());
m_inputs.emplace_back(this, i++, output_descriptor);
}
}
@ -262,16 +262,6 @@ void Node::set_output_type(size_t i, const element::Type& element_type, const Pa
get_output_descriptor(i).get_tensor_ptr()->set_tensor_type(element_type, pshape);
}
std::deque<descriptor::Output>& Node::get_outputs()
{
return m_outputs;
}
const std::deque<descriptor::Output>& Node::get_outputs() const
{
return m_outputs;
}
const std::string& Node::description() const
{
// Terrible transitional kludge to keep description working while we change
@ -638,23 +628,6 @@ const Shape& Node::get_shape() const
return get_output_shape(0);
}
shared_ptr<descriptor::Tensor> Node::get_output_tensor_ptr(size_t i) const
{
NGRAPH_CHECK(
i < m_outputs.size(), "index '", i, "' out of range in get_output_tensor_ptr(size_t i)");
return m_outputs[i].get_tensor_ptr();
}
shared_ptr<descriptor::Tensor> Node::get_output_tensor_ptr() const
{
if (get_output_size() != 1)
{
throw ngraph_error(
"get_output_tensor_ptr() must be called on a node with exactly one output.");
}
return m_outputs[0].get_tensor_ptr();
}
std::set<Input<Node>> Node::get_output_target_inputs(size_t i) const
{
std::set<Input<Node>> result;
@ -688,15 +661,6 @@ const string& Node::get_output_tensor_name(size_t i) const
return m_outputs[i].get_tensor().get_name();
}
descriptor::Tensor& Node::get_output_tensor() const
{
if (get_output_size() != 1)
{
throw ngraph_error("get_output_tensor() must be called on a node with exactly one output.");
}
return get_output_tensor(0);
}
size_t Node::get_input_size() const
{
return m_inputs.size();

View File

@ -268,19 +268,6 @@ namespace ngraph
/// \returns The stream os
virtual std::ostream& write_description(std::ostream& os, uint32_t depth = 0) const;
std::deque<descriptor::Input>& get_inputs() NGRAPH_DEPRECATED("use inputs() instead")
{
return m_inputs;
}
const std::deque<descriptor::Input>& get_inputs() const
NGRAPH_DEPRECATED("use inputs() instead")
{
return m_inputs;
}
std::deque<descriptor::Output>& get_outputs() NGRAPH_DEPRECATED("use outputs() instead");
const std::deque<descriptor::Output>& get_outputs() const
NGRAPH_DEPRECATED("use outputs() instead");
/// Get control dependencies registered on the node
const std::vector<std::shared_ptr<Node>>& get_control_dependencies() const;
@ -354,22 +341,6 @@ namespace ngraph
/// Returns the tensor name for output i
const std::string& get_output_tensor_name(size_t i) const;
/// Checks that there is exactly one output and returns its tensor.
descriptor::Tensor& get_output_tensor() const NGRAPH_DEPRECATED(
"use node->get_output_tensor(0) instead; insert a check that the node has only one "
"output, or update calling code not to assume only one output");
/// Returns the tensor of output i
// TODO: Investigate whether this really needs to be shared_ptr. If so, we'll need a
// replacement in Output.
std::shared_ptr<descriptor::Tensor> get_output_tensor_ptr(size_t i) const
NGRAPH_DEPRECATED("use &node->output(i).get_tensor() instead");
/// Checks that there is exactly one output and returns its tensor.
std::shared_ptr<descriptor::Tensor> get_output_tensor_ptr() const NGRAPH_DEPRECATED(
"use &node->output(i).get_tensor() instead; insert a check that the node has only one "
"output, or update calling code not to assume only one output");
std::set<Input<Node>> get_output_target_inputs(size_t i) const;
/// Returns the number of inputs for the op

View File

@ -215,7 +215,7 @@ op::v1::ConvolutionBackpropData::ConvolutionBackpropData(const Output<Node>& dat
bool op::v1::ConvolutionBackpropData::is_dynamic() const
{
bool is_dynamic = Node::is_dynamic();
if (get_inputs().size() == 3 && !is_dynamic)
if (inputs().size() == 3 && !is_dynamic)
{
return !is_type<op::Constant>(input_value(2).get_node());
}
@ -235,7 +235,7 @@ const PartialShape op::v1::ConvolutionBackpropData::get_output_shape() const
{
shape = PartialShape{vector<Dimension>(m_strides.size())};
}
bool is_output_shape_present = get_inputs().size() == 3;
bool is_output_shape_present = inputs().size() == 3;
if (is_output_shape_present)
{
if (auto const_op = as_type<op::Constant>(input_value(2).get_node()))
@ -297,7 +297,7 @@ void op::v1::ConvolutionBackpropData::validate_and_infer_types()
const PartialShape& filters_pshape = get_input_partial_shape(1);
element::Type filters_et = get_input_element_type(1);
bool is_output_shape_present = get_inputs().size() == 3;
bool is_output_shape_present = inputs().size() == 3;
PartialShape output_pshape = get_output_shape();
element::Type result_et;

View File

@ -265,7 +265,7 @@ bool ngraph::op::v1::GroupConvolutionBackpropData::visit_attributes(AttributeVis
bool op::v1::GroupConvolutionBackpropData::is_dynamic() const
{
bool is_dynamic = Node::is_dynamic();
if (get_inputs().size() == 3 && !is_dynamic)
if (inputs().size() == 3 && !is_dynamic)
{
return !is_type<op::Constant>(input_value(2).get_node());
}
@ -285,7 +285,7 @@ const PartialShape op::v1::GroupConvolutionBackpropData::get_convolution_output_
{
shape = PartialShape{vector<Dimension>(m_strides.size())};
}
bool is_output_shape_present = get_inputs().size() == 3;
bool is_output_shape_present = inputs().size() == 3;
if (is_output_shape_present)
{
if (auto const_op = as_type<op::Constant>(input_value(2).get_node()))
@ -411,7 +411,7 @@ void op::v1::GroupConvolutionBackpropData::pre_validate_and_infer_types()
"spatial features.");
}
bool is_output_shape_present = get_inputs().size() == 3;
bool is_output_shape_present = inputs().size() == 3;
PartialShape output_pshape;
// If output shape is provided, ignore current values for padding begin/end

View File

@ -116,7 +116,7 @@ void op::v1::NonMaxSuppression::validate_and_infer_types()
"Expected a 3D tensor for the 'scores' input. Got: ",
scores_ps);
if (get_inputs().size() >= 3)
if (inputs().size() >= 3)
{
const auto max_boxes_ps = get_input_partial_shape(2);
NODE_VALIDATION_CHECK(this,
@ -125,7 +125,7 @@ void op::v1::NonMaxSuppression::validate_and_infer_types()
max_boxes_ps);
}
if (get_inputs().size() >= 4)
if (inputs().size() >= 4)
{
const auto iou_threshold_ps = get_input_partial_shape(3);
NODE_VALIDATION_CHECK(this,
@ -135,7 +135,7 @@ void op::v1::NonMaxSuppression::validate_and_infer_types()
iou_threshold_ps);
}
if (get_inputs().size() >= 5)
if (inputs().size() >= 5)
{
const auto score_threshold_ps = get_input_partial_shape(4);
NODE_VALIDATION_CHECK(this,
@ -315,7 +315,7 @@ void op::v3::NonMaxSuppression::validate()
"Expected a 3D tensor for the 'scores' input. Got: ",
scores_ps);
if (get_inputs().size() >= 3)
if (inputs().size() >= 3)
{
const auto max_boxes_ps = get_input_partial_shape(2);
NODE_VALIDATION_CHECK(this,
@ -324,7 +324,7 @@ void op::v3::NonMaxSuppression::validate()
max_boxes_ps);
}
if (get_inputs().size() >= 4)
if (inputs().size() >= 4)
{
const auto iou_threshold_ps = get_input_partial_shape(3);
NODE_VALIDATION_CHECK(this,
@ -334,7 +334,7 @@ void op::v3::NonMaxSuppression::validate()
iou_threshold_ps);
}
if (get_inputs().size() >= 5)
if (inputs().size() >= 5)
{
const auto score_threshold_ps = get_input_partial_shape(4);
NODE_VALIDATION_CHECK(this,

View File

@ -68,31 +68,29 @@ bool pass::FusedOpDecomposition::run_on_node(shared_ptr<Node> node)
size_t i = 0;
for (auto output_node : subgraph_outputs)
{
for (size_t j = 0; j < output_node->get_outputs().size(); j++, i++)
for (size_t j = 0; j < output_node->outputs().size(); j++, i++)
{
set<descriptor::Input*> fop_users{begin(node->get_outputs().at(i).get_inputs()),
end(node->get_outputs().at(i).get_inputs())};
std::set<Input<Node>> fop_users = node->outputs().at(i).get_target_inputs();
for (auto fop_user : fop_users)
{
if (auto goe = as_type<op::GetOutputElement>(fop_user->get_raw_pointer_node()))
if (auto goe = as_type<op::GetOutputElement>(fop_user.get_node()))
{
Output<Node> goe_output = goe->get_as_output();
if (goe_output.get_index() == i &&
!goe->output(0).get_target_inputs().empty())
{
// Replace GOE users
set<descriptor::Input*> goe_users{
begin(goe->get_outputs().at(0).get_inputs()),
end(goe->get_outputs().at(0).get_inputs())};
std::set<Input<Node>> goe_users =
goe->outputs().at(0).get_target_inputs();
for (auto goe_user : goe_users)
{
goe_user->replace_output(output_node->get_outputs().at(j));
goe_user.replace_source_output(output_node->output(j));
}
}
}
else
{
fop_user->replace_output(output_node->get_outputs().at(j));
fop_user.replace_source_output(output_node->output(j));
}
}
}