Visitor api loop deserialization (#3894)

* Add on_adpater() implementation for special body parts.

* Remove NodeConverter and LayerCreator for Loop. Add WA for different number of inputs during Loop init by constructor and visit_attributes().

* Format files.

* Implement use case external_port_id=-1 for output port_map,change API for map_type_in_function.

* Replace GetStrAttr() with GetInt64Attr().

* Correct WA for input_offset when using visitorAPI. It shall search all input descriptions for duplicated indexes.

* Apply proper file format.

* Throw exception when input_offset < 0.

* Add more detailed description for input_offset WA.
This commit is contained in:
Szymon Durawa 2021-01-22 12:39:38 +01:00 committed by GitHub
parent d2ef8bf2f9
commit 49cd55a5cc
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 175 additions and 478 deletions

View File

@ -387,6 +387,8 @@ void InferenceEngine::details::CNNLayerCreator::on_adapter(const std::string& na
} else if (auto a = ::ngraph::as_type<::ngraph::AttributeAdapter<std::vector<std::shared_ptr<
ngraph::op::util::SubGraphOp::OutputDescription>>>>(& adapter)) {
(void)a;
} else if (auto a = ::ngraph::as_type<::ngraph::AttributeAdapter<ngraph::op::v5::Loop::SpecialBodyPorts>>(& adapter)) {
(void)a;
} else {
THROW_IE_EXCEPTION << "Error converting ngraph to CNN network. "
"Attribute adapter can not be found for " << name << " parameter";
@ -1557,6 +1559,12 @@ InferenceEngine::details::CNNLayerCreator::CNNLayerCreator(const std::shared_ptr
return res;
});
addSpecificCreator({"Loop"}, [](const std::shared_ptr<::ngraph::Node>& node, const std::map<std::string, std::string>& params) -> CNNLayerPtr {
auto res = createSubGraphLayer(node);
res->type = "Loop";
return res;
});
addSpecificCreator({"SquaredDifference"}, [](const std::shared_ptr<::ngraph::Node>& node,
const std::map<std::string, std::string>& params) -> CNNLayerPtr {
LayerParams attrs = {node->get_friendly_name(), "Eltwise", details::convertPrecision(node->get_output_element_type(0))};
@ -1612,7 +1620,6 @@ void convertFunctionToICNNNetwork(const std::shared_ptr<const ::ngraph::Function
std::make_shared<Builder::NodeConverter<::ngraph::op::PSROIPooling>>(),
std::make_shared<Builder::NodeConverter<::ngraph::op::ScaleShiftIE>>(),
std::make_shared<Builder::NodeConverter<::ngraph::op::VariadicSplit>>(),
std::make_shared<Builder::NodeConverter<::ngraph::opset5::Loop>>(),
std::make_shared<Builder::NodeConverter<::ngraph::op::ShuffleChannels>>(),
std::make_shared<Builder::NodeConverter<::ngraph::op::v4::Interpolate>>(),
std::make_shared<Builder::NodeConverter<::ExecGraphInfoSerialization::ExecutionNode>>(),

View File

@ -108,222 +108,6 @@ CNNLayer::Ptr NodeConverter<ngraph::op::GenericIE>::createLayer(const std::share
return res;
}
CNNLayer::Ptr createSubGraphLayer(const std::shared_ptr<ngraph::Node>& layer) {
auto tensor_iterator = std::dynamic_pointer_cast<ngraph::op::util::SubGraphOp>(layer);
if (!tensor_iterator) {
THROW_IE_EXCEPTION << "Cannot cast layer to TensorIterator.";
}
// inputs/outputs of TensorIterator (ngraph representation)
auto parameters = tensor_iterator->get_function()->get_parameters();
auto results = tensor_iterator->get_function()->get_results();
// Convert body (ngraph representation) to CNNNetwork.
// This network will contain nodes of type = "Input" and data nodes with wrong names.
// IE TensorIterator doesn't include such nodes so we create CNNNetwork in a separate scope
// to call the destructor and delete these "Input"/data nodes.
TensorIterator::Body body;
{
CNNNetwork body_net(tensor_iterator->get_function());
CNNNetwork net(InferenceEngine::details::convertFunctionToICNNNetwork(body_net.getFunction(), body_net));
// Paranoid check for cycles
bool res = CNNNetForestDFS(
CNNNetGetAllInputLayers(net), [](const CNNLayerPtr& layer) {}, false);
if (!res) {
THROW_IE_EXCEPTION << "Loop detected. TensorIterator body should not contain loops.";
}
// Get inputs/outputs of cnn network
auto in_info_map_with_parameters = net.getInputsInfo();
auto out_info_map = net.getOutputsInfo();
IE_ASSERT(in_info_map_with_parameters.size() == parameters.size());
IE_ASSERT(out_info_map.size() == results.size());
InferenceEngine::TensorIterator::Body temp_body;
temp_body.inputs.resize(in_info_map_with_parameters.size());
temp_body.outputs.resize(out_info_map.size());
// Fill inputs/outs in order aligned with ng representation
uint64_t counter = 0;
for (const auto& param : parameters) {
auto info = in_info_map_with_parameters.at(param->get_friendly_name());
temp_body.inputs[counter++] = info->getInputData();
}
auto map_ng_result_to_ie_name = [] (std::shared_ptr<ngraph::op::v0::Result> res_op) {
auto result = res_op->input(0).get_source_output();
std::string name = result.get_node()->get_friendly_name();
if (result.get_node()->get_output_size() > 1) {
name += "." + std::to_string(result.get_index());
}
return name;
};
counter = 0;
for (const auto& result : results) {
auto data = out_info_map.at(map_ng_result_to_ie_name(result));
temp_body.outputs[counter++] = data;
}
// This deep copy will hold all unreachable constants. See the comment in CopyTIBody function.
body = InferenceEngine::NetPass::CopyTIBody(temp_body);
// Check if data is really const layer holder
auto is_constant_holder = [] (const DataPtr data) {
return data->getPrecision() == Precision::UNSPECIFIED;
};
// Strip unreached node holder from Inputs node.
auto holder = body.inputs.back();
if (is_constant_holder(holder)) {
auto& holder_map = getInputTo(holder);
// remove_if
for (auto it = holder_map.begin(); it != holder_map.end(); ) {
if (it->second->type == "Input")
it = holder_map.erase(it);
else
++it;
}
}
// TODO: Disable this WA after total switch onto Ngraph
// WA: Some plugins (like GPU) require matching of Data object name and producer Layer name.
// Data name is expected in format "[layer_name]" or "[layer_name].[port_idx]" in case
// of multiple inputs. We have to restore it if possible and ignore original names of
// Ngraph parameter and result ops.
// Will not change data name if:
// - data has several consumer layers
// - data has no consumer (example if data is straight used as output)
//
for (auto &in : body.inputs) {
if (is_constant_holder(in))
continue;
const auto input_to = getInputTo(in);
if (input_to.size() != 1)
continue;
const auto consumer_layer = input_to.begin()->second;
const auto consumer_in_port_set = consumer_layer->insData;
const auto found = std::find_if(consumer_in_port_set.begin(), consumer_in_port_set.end(),
[&in] (const DataWeakPtr &wptr) { return wptr.lock() == in; });
IE_ASSERT(found != consumer_in_port_set.end());
const auto consumer_port_idx = std::distance(consumer_in_port_set.begin(), found);
auto new_name = consumer_layer->name;
if (consumer_in_port_set.size() > 1) {
new_name += '.' + std::to_string(consumer_port_idx);
}
in->setName(new_name);
}
// TODO: this WA restore original precisions of outputs.
// convertFunctionToICNNNetwork has internal fallback policy for unsupported
// precisions for inputs/outputs ports. Particular for U8 will be translated
// to FP32. However Loop body has strong requirements for continue_condition
// port, it should be BOOL(U8).
//
for (size_t i = 0; i < results.size(); i++) {
auto result = results[i];
auto output = body.outputs[i];
if (result->get_element_type() == ngraph::element::u8) {
output->setPrecision(InferenceEngine::Precision::U8);
}
}
}
// Create Inference Engine representation of TensorIterator
LayerParams params = {layer->get_friendly_name(), "TensorIterator",
details::convertPrecision(layer->get_output_element_type(0))};
auto res = std::make_shared<InferenceEngine::TensorIterator>(params);
res->body = body;
// Port map: outputs
for (const auto& desc : tensor_iterator->get_output_descriptions()) {
auto body_output_idx = desc->m_body_value_index;
std::string type_name = desc->get_type_info().name;
if (type_name == "ConcatOutputDescription") {
auto output_desc = ::ngraph::as_type_ptr<ngraph::op::TensorIterator::ConcatOutputDescription>(desc);
IE_ASSERT(output_desc != nullptr);
res->output_port_map.emplace_back(InferenceEngine::TensorIterator::PortMap {
static_cast<int>(output_desc->m_output_index), static_cast<int>(body_output_idx),
static_cast<int>(output_desc->m_axis), static_cast<int>(output_desc->m_stride),
static_cast<int>(output_desc->m_start), static_cast<int>(output_desc->m_end),
static_cast<int>(output_desc->m_part_size)});
} else if (type_name == "BodyOutputDescription") {
auto output_desc = ::ngraph::as_type_ptr<ngraph::op::TensorIterator::BodyOutputDescription>(desc);
IE_ASSERT(output_desc != nullptr);
res->output_port_map.emplace_back(InferenceEngine::TensorIterator::PortMap {
static_cast<int>(output_desc->m_output_index), static_cast<int>(body_output_idx), -1, 1, 0, -1, 1});
} else {
THROW_IE_EXCEPTION << "Incorrect type of the output description.";
}
}
// Port map : inputs and back edges
for (const auto& desc : tensor_iterator->get_input_descriptions()) {
auto body_input_index = desc->m_body_parameter_index;
if (const auto slice_desc = std::dynamic_pointer_cast<ngraph::op::TensorIterator::SliceInputDescription>(desc)) {
res->input_port_map.emplace_back(InferenceEngine::TensorIterator::PortMap {
static_cast<int>(slice_desc->m_input_index), static_cast<int>(body_input_index),
static_cast<int>(slice_desc->m_axis), static_cast<int>(slice_desc->m_stride),
static_cast<int>(slice_desc->m_start), static_cast<int>(slice_desc->m_end),
static_cast<int>(slice_desc->m_part_size)});
} else if (const auto merge_desc = std::dynamic_pointer_cast<ngraph::op::TensorIterator::MergedInputDescription>(desc)) {
res->input_port_map.emplace_back(InferenceEngine::TensorIterator::PortMap {
static_cast<int>(merge_desc->m_input_index), static_cast<int>(body_input_index), -1, 1, 0, -1, 1});
auto body_output_idx = merge_desc->m_body_value_index;
res->back_edges.emplace_back(InferenceEngine::TensorIterator::PortMap {
static_cast<int>(body_output_idx), static_cast<int>(body_input_index), -1, 1, 0, -1, 1});
} else if (const auto inv_desc = std::dynamic_pointer_cast<ngraph::op::TensorIterator::InvariantInputDescription>(desc)) {
res->input_port_map.emplace_back(InferenceEngine::TensorIterator::PortMap {
static_cast<int>(inv_desc->m_input_index), static_cast<int>(body_input_index), -1, 1, 0, -1, 1});
} else {
THROW_IE_EXCEPTION << "Incorrect type of the input description.";
}
}
if (const auto loop_op = std::dynamic_pointer_cast<const ngraph::opset5::Loop>(layer)) {
auto spec_port = loop_op->get_special_body_ports();
if (spec_port.current_iteration_input_idx != -1) {
auto ie_port_idx = spec_port.current_iteration_input_idx;
res->params["loop_body_current_iteration_idx"] = std::to_string(ie_port_idx);
}
if (spec_port.body_condition_output_idx != -1) {
auto body_output_idx = spec_port.body_condition_output_idx;
res->params["loop_body_condition_output_idx"] = std::to_string(body_output_idx);
}
res->params["loop_trip_count_idx"] = "0";
res->params["loop_execution_condition_idx"] = "1";
}
return res;
}
template<>
CNNLayer::Ptr NodeConverter<ngraph::op::TensorIterator>::createLayer(const std::shared_ptr<ngraph::Node>& layer) const {
auto res = createSubGraphLayer(layer);
res->type = "TensorIterator";
return res;
}
template<>
CNNLayer::Ptr NodeConverter<ngraph::opset5::Loop>::createLayer(const std::shared_ptr<ngraph::Node>& layer) const {
auto res = createSubGraphLayer(layer);
res->type = "Loop";
return res;
}
template <>
CNNLayer::Ptr NodeConverter<ngraph::op::Ceiling>::createLayer(const std::shared_ptr<ngraph::Node>& layer) const {
LayerParams params = {layer->get_friendly_name(), "Ceiling",

View File

@ -46,8 +46,9 @@ IRParser::IRParser(size_t version, const std::vector<InferenceEngine::IExtension
}
}
void V10Parser::XmlDeserializer::map_type_in_function(const pugi::xml_node& node,
const std::string map_type, std::map<uint64_t, uint64_t>& type_id_in_function) {
std::map<uint64_t, uint64_t> V10Parser::XmlDeserializer::map_type_in_function(const pugi::xml_node& node,
const std::string map_type) {
std::map<uint64_t, uint64_t> type_id_in_function;
uint64_t map_type_number = 0;
auto body_node = node.child("body");
@ -56,45 +57,43 @@ void V10Parser::XmlDeserializer::map_type_in_function(const pugi::xml_node& node
}
// Fill map: parameter/result id to parameter/result number in Function
FOREACH_CHILD(_layer, body_node.child("layers"), "layer") {
auto type = XMLParseUtils::GetStrAttr(_layer, "type");
FOREACH_CHILD(layer, body_node.child("layers"), "layer") {
auto type = XMLParseUtils::GetStrAttr(layer, "type");
if (type == map_type) {
auto id = XMLParseUtils::GetUIntAttr(_layer, "id");
type_id_in_function[id] = map_type_number;
auto id = XMLParseUtils::GetUIntAttr(layer, "id");
type_id_in_function.emplace(id, map_type_number);
map_type_number++;
}
}
return type_id_in_function;
}
std::vector<std::shared_ptr<ngraph::op::util::SubGraphOp::InputDescription>> V10Parser::XmlDeserializer::parseInputDescription(const pugi::xml_node& node) {
std::vector<std::shared_ptr<ngraph::op::util::SubGraphOp::InputDescription>> inputs;
std::map<uint64_t, uint64_t> param_id_in_function;
std::map<uint64_t, uint64_t> result_id_in_function;
map_type_in_function(node, "Parameter", param_id_in_function);
map_type_in_function(node, "Result", result_id_in_function);
std::map<uint64_t, uint64_t> param_id_in_function = map_type_in_function(node, "Parameter");
std::map<uint64_t, uint64_t> result_id_in_function = map_type_in_function(node, "Result");
// Parse PortMap: external_port_id for inputs does not always appear in consecutive order
std::map<uint64_t, pugi::xml_node> input_map;
FOREACH_CHILD(_input, node.child("port_map"), "input") {
int64_t ext_port_id = GetInt64Attr(_input, "external_port_id");
input_map[ext_port_id] = _input;
FOREACH_CHILD(input, node.child("port_map"), "input") {
int64_t ext_port_id = GetInt64Attr(input, "external_port_id");
input_map.emplace(ext_port_id, input);
}
for (const auto& input : input_map) {
auto &_input = input.second;
auto axis_attr = _input.attribute("axis");
auto purpose = XMLParseUtils::GetStrAttr(_input, "purpose", "");
int64_t ti_input_index = XMLParseUtils::GetInt64Attr(_input, "external_port_id");
size_t body_parameter_index = XMLParseUtils::GetUIntAttr(_input, "internal_layer_id");
auto &xml_input = input.second;
auto axis_attr = xml_input.attribute("axis");
int64_t ti_input_index = XMLParseUtils::GetInt64Attr(xml_input, "external_port_id");
size_t body_parameter_index = XMLParseUtils::GetUIntAttr(xml_input, "internal_layer_id");
// if axis is set, then slicing is enabled. Create ngraph::TensorIterator::SlicedInput.
if (!axis_attr.empty()) {
size_t axis = XMLParseUtils::GetUIntAttr(_input, "axis");
int64_t start = XMLParseUtils::GetInt64Attr(_input, "start", 0);
int64_t stride = XMLParseUtils::GetInt64Attr(_input, "stride", 1);
int64_t end = XMLParseUtils::GetInt64Attr(_input, "end", -1);
int64_t part_size = XMLParseUtils::GetInt64Attr(_input, "part_size", 1);
size_t axis = XMLParseUtils::GetUIntAttr(xml_input, "axis");
int64_t start = XMLParseUtils::GetInt64Attr(xml_input, "start", 0);
int64_t stride = XMLParseUtils::GetInt64Attr(xml_input, "stride", 1);
int64_t end = XMLParseUtils::GetInt64Attr(xml_input, "end", -1);
int64_t part_size = XMLParseUtils::GetInt64Attr(xml_input, "part_size", 1);
inputs.push_back(std::make_shared<ngraph::op::util::SubGraphOp::SliceInputDescription>
(ti_input_index,
@ -107,11 +106,11 @@ std::vector<std::shared_ptr<ngraph::op::util::SubGraphOp::InputDescription>> V10
} else {
// otherwise find corresponding back edge and create ngraph::TensorIterator::MergedInput
bool is_back_edge_exist = false;
FOREACH_CHILD(_edge, node.child("back_edges"), "edge") {
size_t to_layer = XMLParseUtils::GetUIntAttr(_edge, "to-layer");
FOREACH_CHILD(xml_edge, node.child("back_edges"), "edge") {
size_t to_layer = XMLParseUtils::GetUIntAttr(xml_edge, "to-layer");
if (to_layer == body_parameter_index) {
size_t from_layer = XMLParseUtils::GetUIntAttr(_edge, "from-layer");
size_t from_layer = XMLParseUtils::GetUIntAttr(xml_edge, "from-layer");
inputs.push_back(std::make_shared<ngraph::op::util::SubGraphOp::MergedInputDescription>
(ti_input_index,
param_id_in_function[body_parameter_index],
@ -136,51 +135,93 @@ std::vector<std::shared_ptr<ngraph::op::util::SubGraphOp::InputDescription>> V10
std::vector<std::shared_ptr<ngraph::op::util::SubGraphOp::OutputDescription>> V10Parser::XmlDeserializer::parseOutputDescription(const pugi::xml_node& node) {
std::vector<std::shared_ptr<ngraph::op::util::SubGraphOp::OutputDescription>> outputs;
std::map<uint64_t, uint64_t> result_id_in_function;
map_type_in_function(node, "Result", result_id_in_function);
std::map<uint64_t, uint64_t> result_id_in_function = map_type_in_function(node, "Result");
// Parse PortMap: outputs
std::map<int64_t, pugi::xml_node> output_map;
FOREACH_CHILD(_output, node.child("port_map"), "output") {
int64_t ext_port_id = GetInt64Attr(_output, "external_port_id");
output_map[ext_port_id] = _output;
FOREACH_CHILD(output, node.child("port_map"), "output") {
int64_t ext_port_id = GetInt64Attr(output, "external_port_id");
output_map.emplace(ext_port_id, output);
}
uint64_t output_number = 0;
for (const auto& output : output_map) {
auto& _output = output.second;
auto axis_attr = _output.attribute("axis");
auto purpose = XMLParseUtils::GetStrAttr(_output, "purpose", "");
size_t body_result_index = XMLParseUtils::GetUIntAttr(_output, "internal_layer_id");
auto& xml_output = output.second;
auto axis_attr = xml_output.attribute("axis");
size_t body_result_index = XMLParseUtils::GetUIntAttr(xml_output, "internal_layer_id");
// if axis is set, then concatenation is enabled. Create ngraph::TensorIterator::ConcatOutput.
if (!axis_attr.empty()) {
int64_t axis = XMLParseUtils::GetInt64Attr(_output, "axis");
int64_t start = XMLParseUtils::GetInt64Attr(_output, "start", 0);
int64_t stride = XMLParseUtils::GetInt64Attr(_output, "stride", 1);
int64_t end = XMLParseUtils::GetInt64Attr(_output, "end", -1);
int64_t part_size = XMLParseUtils::GetInt64Attr(_output, "part_size", 1);
// if external_port_id < 0 it means that this body result isn't connected to the Loop output
// and is used only for internal needs. For TensorIterator external_port_id is always > 0.
if (XMLParseUtils::GetInt64Attr(xml_output, "external_port_id") >= 0) {
// if axis is set, then concatenation is enabled. Create ngraph::TensorIterator::ConcatOutput.
if (!axis_attr.empty()) {
int64_t axis = XMLParseUtils::GetInt64Attr(xml_output, "axis");
int64_t start = XMLParseUtils::GetInt64Attr(xml_output, "start", 0);
int64_t stride = XMLParseUtils::GetInt64Attr(xml_output, "stride", 1);
int64_t end = XMLParseUtils::GetInt64Attr(xml_output, "end", -1);
int64_t part_size = XMLParseUtils::GetInt64Attr(xml_output, "part_size", 1);
outputs.push_back(std::make_shared<ngraph::op::util::SubGraphOp::ConcatOutputDescription>
(result_id_in_function[body_result_index],
output_number,
start,
stride,
part_size,
end,
axis));
} else {
// otherwise create ngraph::TensorIterator::BodyOutput. -1 means last iteration.
outputs.push_back(std::make_shared<ngraph::op::util::SubGraphOp::BodyOutputDescription>
(result_id_in_function[body_result_index],
output_number,
-1));
outputs.push_back(std::make_shared<ngraph::op::util::SubGraphOp::ConcatOutputDescription>
(result_id_in_function[body_result_index],
output_number,
start,
stride,
part_size,
end,
axis));
} else {
// otherwise create ngraph::TensorIterator::BodyOutput. -1 means last iteration.
outputs.push_back(std::make_shared<ngraph::op::util::SubGraphOp::BodyOutputDescription>
(result_id_in_function[body_result_index],
output_number,
-1));
}
output_number++;
}
output_number++;
}
return outputs;
}
ngraph::op::v5::Loop::SpecialBodyPorts V10Parser::XmlDeserializer::parsePurposeAttribute(const pugi::xml_node& node) {
ngraph::op::v5::Loop::SpecialBodyPorts result = {-1, -1};
std::map<uint64_t, uint64_t> params = map_type_in_function(node, "Parameter");
std::map<uint64_t, uint64_t> results = map_type_in_function(node, "Result");
NGRAPH_CHECK(!params.empty() || !results.empty(), "No parameters or results found in body Function.");
// Parse PortMap: external_port_id for inputs/outputs does not always appear in consecutive order
std::map<uint64_t, pugi::xml_node> input_map;
FOREACH_CHILD(input, node.child("port_map"), "input") {
int64_t ext_port_id = GetInt64Attr(input, "external_port_id");
input_map.emplace(ext_port_id, input);
}
std::map<int64_t, pugi::xml_node> output_map;
FOREACH_CHILD(output, node.child("port_map"), "output") {
int64_t ext_port_id = GetInt64Attr(output, "external_port_id");
output_map.emplace(ext_port_id, output);
}
for (const auto& input : input_map) {
auto &xml_input = input.second;
auto purpose = XMLParseUtils::GetStrAttr(xml_input, "purpose", "");
size_t body_parameter_index = XMLParseUtils::GetUIntAttr(xml_input, "internal_layer_id");
if (purpose == "current_iteration") {
result.current_iteration_input_idx = params[body_parameter_index];
}
}
for (const auto& output : output_map) {
auto &xml_output = output.second;
auto purpose = XMLParseUtils::GetStrAttr(xml_output, "purpose", "");
size_t body_parameter_index = XMLParseUtils::GetUIntAttr(xml_output, "internal_layer_id");
if (purpose == "execution_condition") {
result.body_condition_output_idx = results[body_parameter_index];
}
}
return result;
}
void V10Parser::XmlDeserializer::on_adapter(const std::string& name, ngraph::ValueAccessor<void>& adapter) {
std::string val;
@ -192,6 +233,8 @@ void V10Parser::XmlDeserializer::on_adapter(const std::string& name, ngraph::Val
} else if (auto a = ngraph::as_type<ngraph::AttributeAdapter<std::vector<std::shared_ptr
<ngraph::op::util::SubGraphOp::OutputDescription>>>>(&adapter)) {
a->set(parseOutputDescription(node));
} else if (auto a = ngraph::as_type<ngraph::AttributeAdapter<ngraph::op::v5::Loop::SpecialBodyPorts>>(&adapter)) {
a->set(parsePurposeAttribute(node));
}
}
@ -628,7 +671,6 @@ std::shared_ptr<ngraph::Node> V10Parser::XmlDeserializer::createNode(
{ "ReorgYolo", std::make_shared<LayerCreator<ngraph::op::ReorgYolo>>("ReorgYolo") },
{ "PSROIPooling", std::make_shared<LayerCreator<ngraph::op::PSROIPooling>>("PSROIPooling") },
{ "VariadicSplit", std::make_shared<LayerCreator<ngraph::op::VariadicSplit>>("VariadicSplit") },
{ "Loop", std::make_shared<LayerCreator<ngraph::opset5::Loop>>("Loop") },
{ "LogicalAnd", std::make_shared<LayerCreator<ngraph::op::v1::LogicalAnd>>("LogicalAnd") },
{ "LogicalOr", std::make_shared<LayerCreator<ngraph::op::v1::LogicalOr>>("LogicalOr") },
{ "LogicalXor", std::make_shared<LayerCreator<ngraph::op::v1::LogicalXor>>("LogicalXor") },
@ -796,198 +838,6 @@ std::shared_ptr<ngraph::Node> V10Parser::XmlDeserializer::createNode(
namespace InferenceEngine {
// SubGraph layer
std::shared_ptr<ngraph::Node>
V10Parser::LayerBaseCreator::fillSubGraphLayer(const ngraph::OutputVector &inputs, const pugi::xml_node &node,
const Blob::CPtr& weights,
const V10Parser::GenericLayerParams &layerParsePrms,
std::shared_ptr<ngraph::op::util::SubGraphOp> subgraph_op) {
subgraph_op->set_friendly_name(GetStrAttr(node, "name"));
auto body_node = node.child("body");
if (body_node.empty()) {
THROW_IE_EXCEPTION << "TensorIterator has no body.";
}
// Fill map: result/parameter id to name
std::map<uint64_t, std::string> layer_idx_to_name;
FOREACH_CHILD(_layer, body_node.child("layers"), "layer") {
auto type = GetStrAttr(_layer, "type");
if (type == "Result" || type == "Parameter") {
auto id = GetUIntAttr(_layer, "id");
auto name = GetStrAttr(_layer, "name");
layer_idx_to_name[id] = name;
}
}
// Create ngraph::Function and set it as body of TensorIterator layer
IRParser parser(10);
auto ngraph_function = parser.parse(node.child("body"), weights)->getFunction();
auto parameter_nodes = ngraph_function->get_parameters();
auto result_nodes = ngraph_function->get_results();
// Disabled reshape for generic operations in the TI body
::ngraph::op::GenericIE::DisableReshape noReshape(ngraph_function);
auto body = std::make_shared<ngraph::Function>(result_nodes, parameter_nodes);
subgraph_op->set_function(body);
// Parse PortMap: inputs
std::map<uint64_t, pugi::xml_node> input_map;
FOREACH_CHILD(_input, node.child("port_map"), "input") {
int64_t ext_port_id = GetInt64Attr(_input, "external_port_id");
input_map[ext_port_id] = _input;
}
bool is_sliced_input_exists = false;
for (const auto& input : input_map) {
auto &_input = input.second;
auto axis_attr = _input.attribute("axis");
auto purpose = GetStrAttr(_input, "purpose", "");
int64_t ti_input_index = GetInt64Attr(_input, "external_port_id");
size_t body_parameter_index = GetUIntAttr(_input, "internal_layer_id");
auto body_param = std::find_if(parameter_nodes.begin(), parameter_nodes.end(),
[&](const std::shared_ptr<ngraph::op::Parameter>& param) {
return param->get_friendly_name() == layer_idx_to_name[body_parameter_index];
});
if (body_param == parameter_nodes.end()) {
THROW_IE_EXCEPTION << "PortMap input parsing error. Body parameter with id = " << body_parameter_index
<< " not found.";
}
if (ti_input_index >= static_cast<int64_t>(inputs.size()))
THROW_IE_EXCEPTION << "TensorIterator " << layerParsePrms.name << " has incorrect number of inputs!";
// if axis is set, then slicing is enabled. Create ngraph::TensorIterator::SlicedInput.
if (!axis_attr.empty()) {
size_t axis = GetUIntAttr(_input, "axis");
int64_t start = GetInt64Attr(_input, "start", 0);
int64_t stride = GetInt64Attr(_input, "stride", 1);
int64_t end = GetInt64Attr(_input, "end", -1);
int64_t part_size = GetInt64Attr(_input, "part_size", 1);
subgraph_op->set_sliced_input(*body_param, inputs.at(ti_input_index), start, stride, part_size, end, axis);
is_sliced_input_exists = true;
} else {
// otherwise find corresponding back edge and create ngraph::TensorIterator::MergedInput
bool is_back_edge_exist = false;
FOREACH_CHILD(_edge, node.child("back_edges"), "edge") {
size_t to_layer = GetUIntAttr(_edge, "to-layer");
if (to_layer == body_parameter_index) {
size_t from_layer = GetUIntAttr(_edge, "from-layer");
auto body_result = std::find_if(
result_nodes.begin(), result_nodes.end(), [&](std::shared_ptr<ngraph::op::Result>& result) {
return result->get_friendly_name() == layer_idx_to_name[from_layer];
});
if (body_result == result_nodes.end()) {
THROW_IE_EXCEPTION << "PortMap input parsing error. Body result with id = " << from_layer
<< " not found.";
}
subgraph_op->set_merged_input(*body_param, inputs.at(ti_input_index), *body_result);
is_back_edge_exist = true;
break;
}
}
// ti_input_index = -1 means that Parameter of the body is not connected to inputs of TensorIterator
// and is used only for internal needs.
if (!is_back_edge_exist && ti_input_index >= 0) {
subgraph_op->set_invariant_input(*body_param, inputs.at(ti_input_index));
}
if (purpose == "current_iteration") {
auto loop = std::dynamic_pointer_cast<ngraph::opset5::Loop>(subgraph_op);
if (!loop)
THROW_IE_EXCEPTION << "PortMap output parsing error. Purpose attribute is available only for Loop operation.";
loop->set_special_body_ports(ngraph::opset5::Loop::SpecialBodyPorts{ngraph_function->get_parameter_index(*body_param),
-1});
}
}
}
// Parse PortMap: outputs
std::map<int64_t, pugi::xml_node> output_map;
FOREACH_CHILD(_output, node.child("port_map"), "output") {
int64_t ext_port_id = GetInt64Attr(_output, "external_port_id");
output_map[ext_port_id] = _output;
}
for (const auto& output : output_map) {
auto& _output = output.second;
auto axis_attr = _output.attribute("axis");
auto purpose = GetStrAttr(_output, "purpose", "");
size_t body_result_index = GetUIntAttr(_output, "internal_layer_id");
auto body_result =
std::find_if(result_nodes.begin(), result_nodes.end(), [&](std::shared_ptr<ngraph::op::Result>& result) {
return result->get_friendly_name() == layer_idx_to_name[body_result_index];
});
if (body_result == result_nodes.end()) {
THROW_IE_EXCEPTION << "PortMap output parsing error. Body result with id = " << body_result_index
<< " not found.";
}
// if axis is set, then concatenation is enabled. Create ngraph::TensorIterator::ConcatOutput.
if (!axis_attr.empty()) {
int64_t axis = GetInt64Attr(_output, "axis");
int64_t start = GetInt64Attr(_output, "start", 0);
int64_t stride = GetInt64Attr(_output, "stride", 1);
int64_t end = GetInt64Attr(_output, "end", -1);
int64_t part_size = GetInt64Attr(_output, "part_size", 1);
subgraph_op->get_concatenated_slices(*body_result, start, stride, part_size, end, axis);
if (!is_sliced_input_exists) {
if (auto ti = std::dynamic_pointer_cast<ngraph::op::TensorIterator>(subgraph_op))
// for Loop op we just skip this call
if (ti)
ti->set_num_iterations((std::abs(end - start)) / part_size);
}
} else if (purpose == "execution_condition") {
auto loop = std::dynamic_pointer_cast<ngraph::opset5::Loop>(subgraph_op);
if (!loop)
THROW_IE_EXCEPTION << "PortMap output parsing error. Purpose attribute is available only for Loop operation.";
loop->set_special_body_ports(ngraph::opset5::Loop::SpecialBodyPorts{loop->get_special_body_ports().current_iteration_input_idx,
ngraph_function->get_result_index(*body_result)});
// if external_port_id < 0,
// it means that this body result isn't connected to the Loop output and is used only for internal needs.
if (output.first >= 0) {
subgraph_op->get_iter_value(*body_result, -1);
}
} else {
// otherwise create ngraph::TensorIterator::BodyOutput. -1 means last iteration.
subgraph_op->get_iter_value(*body_result, -1);
}
}
subgraph_op->validate_and_infer_types();
return subgraph_op;
}
// TensorIterator layer
template <>
std::shared_ptr<ngraph::Node> V10Parser::LayerCreator<ngraph::op::TensorIterator>::createLayer(
const ngraph::OutputVector& inputs, const pugi::xml_node& node, const Blob::CPtr& weights,
const GenericLayerParams& layerParsePrms) {
auto ti = std::make_shared<ngraph::op::TensorIterator>();
return fillSubGraphLayer(inputs, node, weights, layerParsePrms, ti);
}
// Loop layer
template <>
std::shared_ptr<ngraph::Node> V10Parser::LayerCreator<ngraph::opset5::Loop>::createLayer(
const ngraph::OutputVector& inputs, const pugi::xml_node& node, const Blob::CPtr& weights,
const GenericLayerParams& layerParsePrms) {
auto loop = std::make_shared<ngraph::opset5::Loop>(inputs[0], inputs[1]);
return fillSubGraphLayer(inputs, node, weights, layerParsePrms, loop);
}
// LSTMCell layer
template <>
std::shared_ptr<ngraph::Node> V10Parser::LayerCreator<ngraph::op::v0::LSTMCell>::createLayer(

View File

@ -9,7 +9,7 @@
# include <ngraph/op/util/sub_graph_base.hpp>
# include <ngraph/opsets/opset.hpp>
# include <ie_ngraph_utils.hpp>
# include <ngraph/opsets/opset.hpp>
# include <ngraph/opsets/opset5.hpp>
#endif // IR_READER_V10
#include <ie_blob.h>
@ -301,13 +301,17 @@ private:
/// op iterations. Map constains type id and assigned to it consecutive number starting from 0.
/// \param node xml op representation
/// \param type op type name to find
/// \param type_id_in_function map container
void map_type_in_function(const pugi::xml_node& node, std::string type, std::map<uint64_t, uint64_t>& type_id_in_function);
/// \return map container
std::map<uint64_t, uint64_t> map_type_in_function(const pugi::xml_node& node, std::string type);
/// \brief Traverses xml node representation in order to create nGraph function for it.
/// \param node xml node representation
/// \param weights weights attached to current node
/// \return shared pointer to function representing input node
std::shared_ptr<ngraph::Function> parse_function(const pugi::xml_node& root, const Blob::CPtr& weights);
/// \brief Traverses xml node representation in order to get the purpose attribute of inputs/outputs in the body of Loop op.
/// \param node xml node representation
/// \return struct with value of purpuse attribute
ngraph::op::v5::Loop::SpecialBodyPorts parsePurposeAttribute(const pugi::xml_node& node);
GenericLayerParams parseGenericParams(const pugi::xml_node& node);
std::shared_ptr<ngraph::Node> createNode(const ngraph::OutputVector& inputs, const pugi::xml_node& node,

View File

@ -92,4 +92,19 @@ namespace ngraph
};
}
}
template <>
class NGRAPH_API AttributeAdapter<op::v5::Loop::SpecialBodyPorts>
: public DirectValueAccessor<op::v5::Loop::SpecialBodyPorts>
{
public:
AttributeAdapter(op::v5::Loop::SpecialBodyPorts& value)
: DirectValueAccessor<op::v5::Loop::SpecialBodyPorts>(value)
{
}
static constexpr DiscreteTypeInfo type_info{
"AttributeAdapter<op::v5::Loop::SpecialBodyPorts>", 0};
const DiscreteTypeInfo& get_type_info() const override { return type_info; }
};
}

View File

@ -41,8 +41,9 @@ bool op::v5::Loop::visit_attributes(AttributeVisitor& visitor)
visitor.on_attribute("body", m_body);
visitor.on_attribute("input_descriptions", m_input_descriptions);
visitor.on_attribute("output_descriptions", m_output_descriptions);
visitor.on_attribute("special_body_ports", m_special_body_ports);
return false;
return true;
}
void op::v5::Loop::validate_and_infer_types()
@ -167,16 +168,28 @@ void op::v5::Loop::validate_and_infer_types()
m_num_iterations = val[0];
}
// WA: input description with index 0 or 1 means that Loop consructor will duplicate it in
// the inputs.
// When using visit_attributes() no duplication occurs, input_offset shall be decremented.
size_t input_offset = 2;
for (const auto& in_desc : m_input_descriptions)
{
if (in_desc->m_input_index == 0 || in_desc->m_input_index == 1)
{
input_offset--;
}
}
// input_offset < 0 means that there are several duplications of external_port_id
// (the same ext_port_id is connected to several Parameters in the port map) in input_desc,
// this can lead to wrong or undefined behavior, so throw exception here. Ticket: 47302
NODE_VALIDATION_CHECK(this, input_offset >= 0, "External port id 0 or 1 is duplicated.");
NODE_VALIDATION_CHECK(this,
get_input_size() == m_input_descriptions.size() + 2,
get_input_size() == m_input_descriptions.size() + input_offset,
"Number of inputs must be the same as number of input descriptions");
NODE_VALIDATION_CHECK(this,
get_output_size() == m_output_descriptions.size(),
"Number of outputs must be the same as number of output descriptions");
// Input
uint64_t index_it = 2;
uint64_t index_it = input_offset;
for (const auto& input_description : m_input_descriptions)
{
auto index = input_description->m_input_index;
@ -297,14 +310,33 @@ void op::v5::Loop::validate_and_infer_types()
}
}
}
NODE_VALIDATION_CHECK(this,
get_output_size() == m_output_descriptions.size(),
"Number of outputs must be the same as number of output descriptions");
}
std::shared_ptr<Node> op::v5::Loop::clone_with_new_inputs(const OutputVector& new_args) const
{
NGRAPH_OP_SCOPE(v5_Loop_clone_with_new_inputs);
// WA: input description with index 0 or 1 means that Loop consructor will duplicate it in
// the inputs.
// When using visit_attributes() no duplication occurs, input_offset shall be decremented.
size_t input_offset = 2;
for (const auto& in_desc : m_input_descriptions)
{
if (in_desc->m_input_index == 0 || in_desc->m_input_index == 1)
{
input_offset--;
}
}
// input_offset < 0 means that there are several duplications of external_port_id
// (the same ext_port_id is connected to several Parameters in the port map) in input_desc,
// this can lead to wrong or undefined behavior, so throw exception here. Ticket: 47302
NODE_VALIDATION_CHECK(this, input_offset >= 0, "External port id 0 or 1 is duplicated.");
// 0 - trip_count, 1 - execution condition, these inputs are not connected to the body
// params
OutputVector body_params_args(new_args.begin() + 2, new_args.end());
OutputVector body_params_args(new_args.begin() + input_offset, new_args.end());
auto op = make_shared<op::v5::Loop>(new_args[0], new_args[1]);
for (int idx = 2; idx < new_args.size(); ++idx)
{
@ -399,3 +431,8 @@ bool op::v5::Loop::evaluate(const HostTensorVector& outputs, const HostTensorVec
m_body, m_output_descriptions, m_input_descriptions, m_special_body_ports, outputs, inputs);
return true;
}
namespace ngraph
{
constexpr DiscreteTypeInfo AttributeAdapter<op::v5::Loop::SpecialBodyPorts>::type_info;
}