diff --git a/inference-engine/src/readers/ir_reader/ie_ir_parser.cpp b/inference-engine/src/readers/ir_reader/ie_ir_parser.cpp index a8208392fa2..1fa13c7600b 100644 --- a/inference-engine/src/readers/ir_reader/ie_ir_parser.cpp +++ b/inference-engine/src/readers/ir_reader/ie_ir_parser.cpp @@ -668,8 +668,8 @@ std::shared_ptr V10Parser::LayerBaseCreator::fillSubGraphLayer(const ngraph::OutputVector &inputs, const pugi::xml_node &node, std::istream &binStream, const V10Parser::GenericLayerParams &layerParsePrms, - std::shared_ptr tensor_iterator) { - tensor_iterator->set_friendly_name(GetStrAttr(node, "name")); + std::shared_ptr subgraph_op) { + subgraph_op->set_friendly_name(GetStrAttr(node, "name")); auto body_node = node.child("body"); if (body_node.empty()) { @@ -696,12 +696,12 @@ V10Parser::LayerBaseCreator::fillSubGraphLayer(const ngraph::OutputVector &input // Disabled reshape for generic operations in the TI body ::ngraph::op::GenericIE::DisableReshape noReshape(ngraph_function); auto body = std::make_shared(result_nodes, parameter_nodes); - tensor_iterator->set_function(body); + subgraph_op->set_function(body); // Parse PortMap: inputs std::map input_map; FOREACH_CHILD(_input, node.child("port_map"), "input") { - int64_t ext_port_id = GetUIntAttr(_input, "external_port_id"); + int64_t ext_port_id = GetInt64Attr(_input, "external_port_id"); input_map[ext_port_id] = _input; } @@ -709,7 +709,8 @@ V10Parser::LayerBaseCreator::fillSubGraphLayer(const ngraph::OutputVector &input for (const auto& input : input_map) { auto &_input = input.second; auto axis_attr = _input.attribute("axis"); - size_t ti_input_index = GetUIntAttr(_input, "external_port_id"); + auto purpose = GetStrAttr(_input, "purpose", ""); + int64_t ti_input_index = GetInt64Attr(_input, "external_port_id"); size_t body_parameter_index = GetUIntAttr(_input, "internal_layer_id"); auto body_param = std::find_if(parameter_nodes.begin(), parameter_nodes.end(), @@ -721,7 +722,8 @@ V10Parser::LayerBaseCreator::fillSubGraphLayer(const ngraph::OutputVector &input THROW_IE_EXCEPTION << "PortMap input parsing error. Body parameter with id = " << body_parameter_index << " not found."; } - if (ti_input_index >= inputs.size()) + + if (ti_input_index >= static_cast(inputs.size())) THROW_IE_EXCEPTION << "TensorIterator " << layerParsePrms.name << " has incorrect number of inputs!"; // if axis is set, then slicing is enabled. Create ngraph::TensorIterator::SlicedInput. @@ -731,7 +733,7 @@ V10Parser::LayerBaseCreator::fillSubGraphLayer(const ngraph::OutputVector &input int64_t stride = GetInt64Attr(_input, "stride", 1); int64_t end = GetInt64Attr(_input, "end", -1); int64_t part_size = GetInt64Attr(_input, "part_size", 1); - tensor_iterator->set_sliced_input(*body_param, inputs[ti_input_index], start, stride, part_size, end, axis); + subgraph_op->set_sliced_input(*body_param, inputs.at(ti_input_index), start, stride, part_size, end, axis); is_sliced_input_exists = true; } else { // otherwise find corresponding back edge and create ngraph::TensorIterator::MergedInput @@ -752,28 +754,40 @@ V10Parser::LayerBaseCreator::fillSubGraphLayer(const ngraph::OutputVector &input << " not found."; } - tensor_iterator->set_merged_input(*body_param, inputs[ti_input_index], *body_result); + subgraph_op->set_merged_input(*body_param, inputs.at(ti_input_index), *body_result); is_back_edge_exist = true; break; } } - if (!is_back_edge_exist) { - tensor_iterator->set_invariant_input(*body_param, inputs[ti_input_index]); + // ti_input_index = -1 means that Parameter of the body is not connected to inputs of TensorIterator + // and is used only for internal needs. + if (!is_back_edge_exist && ti_input_index >= 0) { + subgraph_op->set_invariant_input(*body_param, inputs.at(ti_input_index)); + } + + if (purpose == "current_iteration") { + auto loop = std::dynamic_pointer_cast(subgraph_op); + if (!loop) + THROW_IE_EXCEPTION << "PortMap output parsing error. Purpose attribute is available only for Loop operation."; + loop->set_special_body_ports(ngraph::opset5::Loop::SpecialBodyPorts{ngraph_function->get_parameter_index(*body_param), + -1}); } } } // Parse PortMap: outputs - std::map output_map; + std::map output_map; FOREACH_CHILD(_output, node.child("port_map"), "output") { - uint32_t ext_port_id = GetUIntAttr(_output, "external_port_id"); + int64_t ext_port_id = GetInt64Attr(_output, "external_port_id"); output_map[ext_port_id] = _output; } + int i = 0; for (const auto& output : output_map) { auto& _output = output.second; auto axis_attr = _output.attribute("axis"); + auto purpose = GetStrAttr(_output, "purpose", ""); size_t body_result_index = GetUIntAttr(_output, "internal_layer_id"); auto body_result = @@ -788,25 +802,38 @@ V10Parser::LayerBaseCreator::fillSubGraphLayer(const ngraph::OutputVector &input // if axis is set, then concatenation is enabled. Create ngraph::TensorIterator::ConcatOutput. if (!axis_attr.empty()) { - uint32_t axis = GetUIntAttr(_output, "axis"); + int64_t axis = GetInt64Attr(_output, "axis"); int64_t start = GetInt64Attr(_output, "start", 0); int64_t stride = GetInt64Attr(_output, "stride", 1); int64_t end = GetInt64Attr(_output, "end", -1); int64_t part_size = GetInt64Attr(_output, "part_size", 1); - tensor_iterator->get_concatenated_slices(*body_result, start, stride, part_size, end, axis); + subgraph_op->get_concatenated_slices(*body_result, start, stride, part_size, end, axis); if (!is_sliced_input_exists) { - if (auto ti = std::dynamic_pointer_cast(tensor_iterator)) - ti->set_num_iterations((std::abs(end - start)) / part_size); + if (auto ti = std::dynamic_pointer_cast(subgraph_op)) + // for Loop op we just skip this call + if (ti) + ti->set_num_iterations((std::abs(end - start)) / part_size); + } + } else if (purpose == "execution_condition") { + auto loop = std::dynamic_pointer_cast(subgraph_op); + if (!loop) + THROW_IE_EXCEPTION << "PortMap output parsing error. Purpose attribute is available only for Loop operation."; + loop->set_special_body_ports(ngraph::opset5::Loop::SpecialBodyPorts{loop->get_special_body_ports().current_iteration_input_idx, + ngraph_function->get_result_index(*body_result)}); + // if external_port_id < 0, + // it means that this body result isn't connected to the Loop output and is used only for internal needs. + if (output.first >= 0) { + subgraph_op->get_iter_value(*body_result, -1); } } else { // otherwise create ngraph::TensorIterator::BodyOutput. -1 means last iteration. - tensor_iterator->get_iter_value(*body_result, -1); + subgraph_op->get_iter_value(*body_result, -1); } } - tensor_iterator->validate_and_infer_types(); - return tensor_iterator; + subgraph_op->validate_and_infer_types(); + return subgraph_op; } @@ -824,7 +851,7 @@ template <> std::shared_ptr V10Parser::LayerCreator::createLayer( const ngraph::OutputVector& inputs, const pugi::xml_node& node, std::istream& binStream, const GenericLayerParams& layerParsePrms) { - auto loop = std::make_shared(); + auto loop = std::make_shared(inputs[0], inputs[1]); return fillSubGraphLayer(inputs, node, binStream, layerParsePrms, loop); } diff --git a/inference-engine/src/readers/ir_reader/ie_ir_parser.hpp b/inference-engine/src/readers/ir_reader/ie_ir_parser.hpp index b792d6b35d7..4c70de5883d 100644 --- a/inference-engine/src/readers/ir_reader/ie_ir_parser.hpp +++ b/inference-engine/src/readers/ir_reader/ie_ir_parser.hpp @@ -106,7 +106,7 @@ private: static std::shared_ptr fillSubGraphLayer(const ngraph::OutputVector& inputs, const pugi::xml_node& node, std::istream& binStream, const GenericLayerParams& layerParsePrms, - std::shared_ptr sub_graph_node); + std::shared_ptr subgraph_op); explicit LayerBaseCreator(const std::string& type): type(type) {} std::string getType() { return type; diff --git a/inference-engine/tests/functional/inference_engine/ngraph_reader/loop.cpp b/inference-engine/tests/functional/inference_engine/ngraph_reader/loop.cpp new file mode 100644 index 00000000000..07395cf60ea --- /dev/null +++ b/inference-engine/tests/functional/inference_engine/ngraph_reader/loop.cpp @@ -0,0 +1,383 @@ +// Copyright (C) 2018-2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include "ngraph_reader_tests.hpp" +#include "common_test_utils/data_utils.hpp" + +TEST_F(NGraphReaderTests, ReadLoopNetwork) { + std::string model = R"V0G0N( + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 1 + + + + + + + + 1 + + + + + 1 + + + + + + + + 1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 1 + 1 + 1 + 1 + + + + + + + + + + + + + + +)V0G0N"; + + Core reader; + Blob::Ptr weights; + weights = make_shared_blob(TensorDesc(Precision::U8, {8}, Layout::C)); + weights->allocate(); + weights->buffer().as()[0] = 0; + EXPECT_NO_THROW(reader.ReadNetwork(model, weights)); +} + +TEST_F(NGraphReaderTests, ReadLoopNetwork_2) { + std::string model = R"V0G0N( + + + + + + + + 1 + + + + + + + + + + + + + + 1 + 2 + + + + + + + 1 + + + + 1 + 2 + + + + + 1 + 2 + + + 3 + 1 + 2 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 1 + 2 + + + + + + + + 1 + 2 + + + + + + + 1 + 2 + + + 1 + 2 + + + + + 1 + 2 + + + + + + + 1 + 2 + + + + + + + + 1 + + + + + + + 1 + 2 + + + 1 + + + + + 3 + 1 + 2 + + + + + + + 3 + 1 + 2 + + + + + + + + + + + + + + + + + + + 1 + 2 + + + + + + + 3 + 1 + 2 + + + + + + + + + + + + + +)V0G0N"; + + Core reader; + Blob::Ptr weights; + weights = make_shared_blob(TensorDesc(Precision::U8, {8}, Layout::C)); + weights->allocate(); + weights->buffer().as()[0] = 0; + weights->buffer().as()[1] = 0; + EXPECT_NO_THROW(reader.ReadNetwork(model, weights)); +} diff --git a/ngraph/core/src/op/loop.cpp b/ngraph/core/src/op/loop.cpp index 3cf0881cf74..25245522da6 100644 --- a/ngraph/core/src/op/loop.cpp +++ b/ngraph/core/src/op/loop.cpp @@ -111,6 +111,36 @@ void op::v5::Loop::validate_and_infer_types() m_num_iterations = 1; // condition_always_false, do_while mode } } + else if (const auto& cond_param = std::dynamic_pointer_cast( + body_execution_condition.get_node_shared_ptr())) + { + // Const(true or false) -> Loop (body: Parameter -> execution_condition output) + for (const auto& desc : get_input_descriptions()) + { + if (m_body->get_parameters().at(desc->m_body_parameter_index) == cond_param) + { + if (const auto& cond_value = + std::dynamic_pointer_cast( + input_value(desc->m_input_index).get_node_shared_ptr())) + { + auto val = cond_value->cast_vector(); + NODE_VALIDATION_CHECK( + this, + val.size() == 1, + "The number of values in the Condition constant is greater than 1"); + + if (val[0]) + { + condition_always_true = true; + } + else + { + m_num_iterations = 1; // condition_always_false, do_while mode + } + } + } + } + } const auto& trip_count = input_value(0); const auto& trip_count_rank = trip_count.get_partial_shape().rank();