Remove generic ie op (#4213)
* Removed legacy IE shape infer * Removed GenericIE operation * Removed legacy shape infer tests * Removed legacy test with legacy IE reshape * Fixed compilation issues related to removal of GenericIE * Fixed one more compilation issue with clDNN * Fixed test for reading experimental ops * Updated tests and make IR Reader to load old experimenal and extension ops as opset6 * Change opset of some ops only if they are currently experimental/extension to avoid situation like opset1::Proposal -> opset6::Proposal * Removed more legacy code * Returned back code removed by mistake * Fixed issues related to incorrect merge with master * Merge fixes * Fixed unit tests which starts to fail because now loading the model with unknown operation is failed earlier * Removed incorrectly added code Co-authored-by: Evgeny Lazarev <elazarev.nnov@gmail.com>
This commit is contained in:
parent
0442184d0d
commit
3f5ff2cfe5
@ -844,8 +844,7 @@ EXCLUDE_SYMLINKS = NO
|
||||
# Note that the wildcards are matched against the file with absolute path, so to
|
||||
# exclude all test directories for example use the pattern */test/*
|
||||
|
||||
EXCLUDE_PATTERNS = generic_ie.hpp \
|
||||
function_name.hpp \
|
||||
EXCLUDE_PATTERNS = function_name.hpp \
|
||||
macro_overload.hpp
|
||||
|
||||
# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
|
||||
|
@ -38,8 +38,6 @@ FILE_PATTERNS = *.c \
|
||||
*.hpp \
|
||||
*.md
|
||||
|
||||
EXCLUDE_PATTERNS = generic_ie.hpp
|
||||
|
||||
EXCLUDE_SYMBOLS = InferenceEngine::details
|
||||
|
||||
TAGFILES = "@DOCS_BUILD_DIR@/ie_api.tag=.." \
|
||||
|
@ -21,7 +21,6 @@
|
||||
#include <ngraph/opsets/opset5.hpp>
|
||||
#include <ngraph/pass/manager.hpp>
|
||||
#include <ngraph/pass/constant_folding.hpp>
|
||||
#include <generic_ie.hpp>
|
||||
#include <ie_ngraph_utils.hpp>
|
||||
|
||||
#include <transformations/opset_conversions/convert_opset3_to_opset2.hpp>
|
||||
@ -137,8 +136,6 @@ InferenceEngine::CNNNetwork clDNNEngine::CloneAndTransformNetwork(const Inferenc
|
||||
if (clonedNetwork.getFunction()) {
|
||||
OV_ITT_SCOPED_TASK(itt::domains::CLDNNPlugin, "clDNNEngine::TransformNetwork");
|
||||
auto nGraphFunc = clonedNetwork.getFunction();
|
||||
// Disable shape inference (WA for generic operations)
|
||||
ngraph::op::GenericIE::DisableReshape noReshape(nGraphFunc);
|
||||
|
||||
bool enableInt8;
|
||||
{
|
||||
|
@ -25,8 +25,8 @@
|
||||
#include <sys/stat.h>
|
||||
#include <exec_graph_info.hpp>
|
||||
#include <ie_ngraph_utils.hpp>
|
||||
#include "generic_ie.hpp"
|
||||
#include <ngraph/variant.hpp>
|
||||
#include <ngraph/ngraph.hpp>
|
||||
#include "cldnn_itt.h"
|
||||
|
||||
using namespace InferenceEngine;
|
||||
@ -464,7 +464,6 @@ InferenceEngine::CNNNetwork CLDNNGraph::GetExecGraphInfoByPrimitivesInfo(std::ve
|
||||
create_ngraph_node(pi);
|
||||
}
|
||||
|
||||
ngraph::op::GenericIE::DisableReshape reshape(nodes);
|
||||
auto function = std::make_shared<ngraph::Function>(results, params, "runtime_gpu_graph");
|
||||
InferenceEngine::CNNNetwork net(function);
|
||||
return net;
|
||||
|
@ -40,7 +40,6 @@
|
||||
#include <layers/gna_fake_quantize_layer.hpp>
|
||||
#include "gna_graph_patterns.hpp"
|
||||
|
||||
#include <generic_ie.hpp>
|
||||
#include <ngraph/pass/manager.hpp>
|
||||
#include <legacy/convert_function_to_cnn_network.hpp>
|
||||
#include <legacy/transformations/convert_opset1_to_legacy/convert_opset1_to_legacy.hpp>
|
||||
@ -682,8 +681,6 @@ void GNAPlugin::LoadNetwork(CNNNetwork & _network) {
|
||||
if (_network.getFunction()) {
|
||||
CNNNetwork clonedNetwork = InferenceEngine::cloneNetwork(_network);
|
||||
const auto& graph = clonedNetwork.getFunction();
|
||||
// Disable shape inference (WA for generic operations)
|
||||
ngraph::op::GenericIE::DisableReshape noReshape(graph);
|
||||
ngraph::pass::Manager manager;
|
||||
manager.register_pass<ngraph::pass::InitNodeInfo>();
|
||||
// WA: ConvertPriorBox must be executed before the 1st ConstantFolding pass
|
||||
|
@ -32,7 +32,6 @@
|
||||
#include "ie_ngraph_utils.hpp"
|
||||
#include "exec_graph_info.hpp"
|
||||
#include "ie_itt.hpp"
|
||||
#include "generic_ie.hpp"
|
||||
|
||||
using namespace std;
|
||||
using namespace InferenceEngine;
|
||||
@ -44,7 +43,6 @@ static std::shared_ptr<ngraph::Function> copyFunction(const std::shared_ptr<cons
|
||||
bool constFolding) {
|
||||
OV_ITT_SCOPED_TASK(itt::domains::IE, "copyFunction");
|
||||
|
||||
::ngraph::op::GenericIE::DisableReshape noReshape(func);
|
||||
auto specialized_function = ngraph::clone_function(*func);
|
||||
|
||||
if (constFolding) {
|
||||
@ -286,8 +284,6 @@ std::shared_ptr<ngraph::Function> CNNNetworkNGraphImpl::cloneFunction(bool const
|
||||
}
|
||||
|
||||
void CNNNetworkNGraphImpl::reshape() {
|
||||
// Disable reshape for generic nodes
|
||||
::ngraph::op::GenericIE::DisableReshape noReshape(_ngraph_function);
|
||||
reshape({});
|
||||
}
|
||||
|
||||
|
@ -1,115 +0,0 @@
|
||||
// Copyright (C) 2017-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "generic_ie.hpp"
|
||||
|
||||
#include <ie_blob.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <ie_parameter.hpp>
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "blob_factory.hpp"
|
||||
#include <ie_ngraph_utils.hpp>
|
||||
#include "ngraph/util.hpp"
|
||||
#include "ngraph/graph_util.hpp"
|
||||
#include "ngraph/validation_util.hpp"
|
||||
|
||||
constexpr ::ngraph::NodeTypeInfo ngraph::op::GenericIE::type_info;
|
||||
|
||||
void ngraph::op::GenericIE::addExtension(std::shared_ptr<const ngraph::Function> func,
|
||||
const InferenceEngine::IShapeInferExtensionPtr& ext) {
|
||||
NodeVector nodes;
|
||||
|
||||
for (auto r : func->get_results())
|
||||
nodes.emplace_back(r);
|
||||
for (auto s : func->get_sinks())
|
||||
nodes.emplace_back(s);
|
||||
for (auto param : func->get_parameters())
|
||||
nodes.emplace_back(param);
|
||||
|
||||
traverse_nodes(nodes, [&](std::shared_ptr<Node> op) {
|
||||
if (auto generic = std::dynamic_pointer_cast<GenericIE>(op)) {
|
||||
generic->addExtension(ext);
|
||||
}
|
||||
if (auto ti = std::dynamic_pointer_cast<ngraph::op::TensorIterator>(op)) {
|
||||
addExtension(ti->get_body(), ext);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
void ngraph::op::GenericIE::addExtension(const InferenceEngine::IShapeInferExtensionPtr& ext) {
|
||||
extensions.emplace_back(ext);
|
||||
}
|
||||
|
||||
std::vector<InferenceEngine::IShapeInferExtensionPtr> ngraph::op::GenericIE::getExtensions(std::shared_ptr<const ngraph::Function> func) {
|
||||
for (auto& op : func->get_ops()) {
|
||||
if (auto generic = std::dynamic_pointer_cast<GenericIE>(op)) {
|
||||
return generic->getExtensions();
|
||||
}
|
||||
}
|
||||
return {};
|
||||
}
|
||||
|
||||
std::vector<InferenceEngine::IShapeInferExtensionPtr> ngraph::op::GenericIE::getExtensions() {
|
||||
return extensions;
|
||||
}
|
||||
|
||||
ngraph::op::GenericIE::GenericIE(const ngraph::OutputVector& inputs,
|
||||
const std::map<std::string, InferenceEngine::Parameter>& params_,
|
||||
const std::string type_, const std::vector<PortIE>& outputs_)
|
||||
: Op(inputs), params(params_), outputs(outputs_), type(type_), initialized(0) {
|
||||
constructor_validate_and_infer_types();
|
||||
}
|
||||
|
||||
std::shared_ptr<ngraph::Node> ngraph::op::GenericIE::clone_with_new_inputs(const ngraph::OutputVector& new_args) const {
|
||||
auto genNode = std::make_shared<GenericIE>(new_args, params, type, outputs);
|
||||
genNode->extensions = extensions;
|
||||
genNode->reshape = reshape;
|
||||
return genNode;
|
||||
}
|
||||
|
||||
void ngraph::op::GenericIE::validate_and_infer_types() {
|
||||
// This function returns precision based on existing precision and
|
||||
// precision that was set in outputs vector
|
||||
auto get_precision = [this](const size_t index) -> element::Type {
|
||||
if (index >= get_output_size() ||
|
||||
get_output_element_type(index) == element::dynamic ||
|
||||
get_output_element_type(index) == element::undefined) {
|
||||
return InferenceEngine::details::convertPrecision(outputs[index].precision);
|
||||
}
|
||||
return get_output_element_type(index);
|
||||
};
|
||||
|
||||
// Extensions are not loaded when we create nGraph function
|
||||
// First call: create node
|
||||
if (initialized < 1) {
|
||||
if (outputs.size())
|
||||
set_output_size(outputs.size());
|
||||
for (size_t output_index = 0; output_index < outputs.size(); output_index++) {
|
||||
set_output_type(output_index, get_precision(output_index), Shape(outputs[output_index].dims));
|
||||
}
|
||||
initialized++;
|
||||
} else if (reshape) {
|
||||
THROW_IE_EXCEPTION << "IShapeInferExtension wasn't registered for node " << get_friendly_name()
|
||||
<< " with type " << type;
|
||||
}
|
||||
}
|
||||
|
||||
bool ngraph::op::GenericIE::visit_attributes(ngraph::AttributeVisitor& visitor) {
|
||||
for (const auto& p : params) {
|
||||
std::string name = p.first;
|
||||
std::string value = p.second;
|
||||
visitor.on_attribute(name, value);
|
||||
}
|
||||
// This is a way to pass type name to transformations::Serialize() without
|
||||
// adding plugin_api dependency on transformation library
|
||||
std::string name = "__generic_ie_type__";
|
||||
std::string value = getType();
|
||||
visitor.on_attribute(name, value);
|
||||
return true;
|
||||
}
|
@ -17,7 +17,6 @@
|
||||
#include <ngraph/pass/manager.hpp>
|
||||
#include <ie_common.h>
|
||||
|
||||
#include "generic_ie.hpp"
|
||||
#include "cnn_network_ngraph_impl.hpp"
|
||||
#include <transformations/init_node_info.hpp>
|
||||
#include <transformations/common_optimizations/common_optimizations.hpp>
|
||||
@ -94,8 +93,6 @@ CNNNetworkImpl::CNNNetworkImpl(const ICNNNetwork & ngraphImpl) {
|
||||
IE_ASSERT(ngraphImplPtr != nullptr);
|
||||
IE_ASSERT(ngraphImplPtr->getFunction() != nullptr);
|
||||
auto graph = ngraph::clone_function(*ngraphImpl.getFunction());
|
||||
// Disable shape inference (WA for generic operations)
|
||||
::ngraph::op::GenericIE::DisableReshape noReshape(graph);
|
||||
|
||||
::ngraph::pass::Manager manager;
|
||||
manager.register_pass<::ngraph::pass::InitNodeInfo>();
|
||||
|
@ -39,7 +39,6 @@
|
||||
#include "legacy/ngraph_ops/rnn_sequence_ie.hpp"
|
||||
#include "legacy/ngraph_ops/lstm_sequence_ie.hpp"
|
||||
#include "legacy/ngraph_ops/gru_sequence_ie.hpp"
|
||||
#include "generic_ie.hpp"
|
||||
#include "exec_graph_info.hpp"
|
||||
|
||||
#include "caseless.hpp"
|
||||
@ -1710,36 +1709,6 @@ InferenceEngine::details::CNNLayerCreator::CNNLayerCreator(const std::shared_ptr
|
||||
return res;
|
||||
});
|
||||
|
||||
addSpecificCreator({"GenericIE"}, [](const std::shared_ptr<::ngraph::Node> &node,
|
||||
const std::map<std::string, std::string> ¶ms) -> CNNLayerPtr {
|
||||
auto type = params.at("__generic_ie_type__");
|
||||
auto castedLayer = ngraph::as_type_ptr<ngraph::op::GenericIE>(node);
|
||||
LayerParams attrs = {node->get_friendly_name(), type, details::convertPrecision(node->get_output_element_type(0))};
|
||||
auto res = std::make_shared<InferenceEngine::CNNLayer>(attrs);
|
||||
if (type == "RNNCell") {
|
||||
res = std::make_shared<InferenceEngine::RNNCell>(attrs);
|
||||
}
|
||||
if (type == "GRUCell") {
|
||||
res = std::make_shared<InferenceEngine::GRUCell>(attrs);
|
||||
}
|
||||
|
||||
auto weightableLayer = std::dynamic_pointer_cast<InferenceEngine::WeightableLayer>(res);
|
||||
for (const auto& param : castedLayer->getParameters()) {
|
||||
if (param.second.is<Blob::Ptr>()) {
|
||||
res->blobs[param.first] = param.second.as<Blob::Ptr>();
|
||||
} else if (param.second.is<Blob::CPtr>()) {
|
||||
res->blobs[param.first] = std::const_pointer_cast<Blob>(param.second.as<Blob::CPtr>());
|
||||
} else if (param.second.is<std::string>()) {
|
||||
res->params[param.first] = param.second.as<std::string>();
|
||||
}
|
||||
if (weightableLayer && param.first == "weights")
|
||||
weightableLayer->_weights = res->blobs[param.first];
|
||||
if (weightableLayer && param.first == "biases")
|
||||
weightableLayer->_biases = res->blobs[param.first];
|
||||
}
|
||||
return res;
|
||||
});
|
||||
|
||||
addSpecificCreator({"ShuffleChannels"}, [](const std::shared_ptr<::ngraph::Node>& node,
|
||||
const std::map<std::string, std::string>& params) -> CNNLayerPtr {
|
||||
LayerParams attrs = {node->get_friendly_name(), "ShuffleChannels", details::convertPrecision(node->get_output_element_type(0))};
|
||||
|
@ -8,8 +8,8 @@
|
||||
#include <ie_ngraph_utils.hpp>
|
||||
#include "exec_graph_info.hpp"
|
||||
#include "mkldnn_debug.h"
|
||||
#include "generic_ie.hpp"
|
||||
#include <ngraph/variant.hpp>
|
||||
#include "ngraph/ngraph.hpp"
|
||||
|
||||
#include <vector>
|
||||
#include <string>
|
||||
@ -136,7 +136,6 @@ InferenceEngine::CNNNetwork dump_graph_as_ie_ngraph_net(const MKLDNNGraph &graph
|
||||
holder->add_control_dependency(node);
|
||||
}
|
||||
|
||||
ngraph::op::GenericIE::DisableReshape reshape(nodes);
|
||||
auto function = std::make_shared<ngraph::Function>(results, params, graph._name);
|
||||
InferenceEngine::CNNNetwork net(function);
|
||||
return net;
|
||||
|
@ -15,7 +15,6 @@
|
||||
#include <vector>
|
||||
#include <tuple>
|
||||
#include <ie_system_conf.h>
|
||||
#include <generic_ie.hpp>
|
||||
#include <nodes/list.hpp>
|
||||
#include <legacy/ie_util_internal.hpp>
|
||||
#include <legacy/graph_transformer.h>
|
||||
@ -106,8 +105,6 @@ Engine::~Engine() {
|
||||
|
||||
static void Transformation(CNNNetwork& clonedNetwork, const Config& conf) {
|
||||
auto nGraphFunc = clonedNetwork.getFunction();
|
||||
// Disable shape inference (WA for generic operations)
|
||||
ngraph::op::GenericIE::DisableReshape noReshape(nGraphFunc);
|
||||
|
||||
ngraph::pass::Manager manager;
|
||||
manager.register_pass<ngraph::pass::InitNodeInfo>();
|
||||
|
@ -1,137 +0,0 @@
|
||||
// Copyright (C) 2018-2020 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <map>
|
||||
|
||||
#include <ie_parameter.hpp>
|
||||
#include <ie_precision.hpp>
|
||||
|
||||
#include <ngraph/op/op.hpp>
|
||||
#include <ngraph/op/tensor_iterator.hpp>
|
||||
#include <ngraph/graph_util.hpp>
|
||||
|
||||
namespace InferenceEngine {
|
||||
|
||||
class IShapeInferExtension;
|
||||
using IShapeInferExtensionPtr = std::shared_ptr<IShapeInferExtension>;
|
||||
|
||||
} // namespace InferenceEngine
|
||||
|
||||
namespace ngraph {
|
||||
namespace op {
|
||||
|
||||
/**
|
||||
* This generic operation is necessary for legacy scenario.
|
||||
* If user has old shape inference extensions, this node allow to use it for shape inference
|
||||
*/
|
||||
class INFERENCE_ENGINE_API_CLASS(GenericIE) : public Op {
|
||||
public:
|
||||
struct PortIE {
|
||||
InferenceEngine::Precision precision;
|
||||
std::vector<size_t> dims;
|
||||
};
|
||||
|
||||
class DisableReshape {
|
||||
public:
|
||||
explicit DisableReshape(std::vector<std::shared_ptr<ngraph::Node>>& ops) {
|
||||
for (auto& op : ops) {
|
||||
addOp(op);
|
||||
}
|
||||
}
|
||||
explicit DisableReshape(const std::shared_ptr<const ngraph::Function>& graph) {
|
||||
IE_ASSERT(graph);
|
||||
|
||||
for (auto& op : graph->get_ops()) {
|
||||
addOp(op);
|
||||
}
|
||||
}
|
||||
|
||||
~DisableReshape() {
|
||||
for (auto& generic : genericOps) {
|
||||
generic->doReshape(true);
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
std::vector<std::shared_ptr<ngraph::op::GenericIE>> genericOps;
|
||||
|
||||
void addOp(std::shared_ptr<ngraph::Node>& op) {
|
||||
if (auto generic = std::dynamic_pointer_cast<GenericIE>(op)) {
|
||||
generic->doReshape(false);
|
||||
genericOps.emplace_back(generic);
|
||||
}
|
||||
if (auto ti_node = std::dynamic_pointer_cast<ngraph::op::TensorIterator>(op)) {
|
||||
auto results = ti_node->get_body()->get_results();
|
||||
auto ti_params = ti_node->get_body()->get_parameters();
|
||||
ngraph::NodeVector nResults, nParams;
|
||||
for (const auto& res : results)
|
||||
nResults.emplace_back(res);
|
||||
for (const auto& param : ti_params)
|
||||
nParams.emplace_back(param);
|
||||
ngraph::traverse_nodes(nResults, [&](std::shared_ptr<ngraph::Node> node) {
|
||||
if (auto genNode = std::dynamic_pointer_cast<ngraph::op::GenericIE>(node)) {
|
||||
genNode->doReshape(false);
|
||||
genericOps.emplace_back(genNode);
|
||||
}
|
||||
}, nParams);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
static constexpr NodeTypeInfo type_info{"GenericIE", 1};
|
||||
const NodeTypeInfo& get_type_info() const override { return type_info; }
|
||||
|
||||
/**
|
||||
* @brief constructor of generic node
|
||||
*
|
||||
* @param inputs vector of inputs
|
||||
* @param params map of parameters (std::string, Blob::Ptr, Blob::CPtr)
|
||||
* @param type string with original layer type
|
||||
* @param outputs information about output ports from IR
|
||||
*/
|
||||
GenericIE(const OutputVector& inputs,
|
||||
const std::map<std::string, InferenceEngine::Parameter>& params,
|
||||
const std::string type,
|
||||
const std::vector<PortIE>& outputs);
|
||||
|
||||
void validate_and_infer_types() override;
|
||||
|
||||
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
|
||||
|
||||
bool visit_attributes(ngraph::AttributeVisitor& visitor) override;
|
||||
|
||||
static void addExtension(std::shared_ptr<const ngraph::Function> func, const InferenceEngine::IShapeInferExtensionPtr& ext);
|
||||
static std::vector<InferenceEngine::IShapeInferExtensionPtr> getExtensions(std::shared_ptr<const ngraph::Function> func);
|
||||
|
||||
const std::string& getType() const {
|
||||
return type;
|
||||
}
|
||||
|
||||
const std::map<std::string, InferenceEngine::Parameter>& getParameters() const {
|
||||
return params;
|
||||
}
|
||||
|
||||
private:
|
||||
void doReshape(bool flag) {
|
||||
reshape = flag;
|
||||
}
|
||||
|
||||
std::vector<InferenceEngine::IShapeInferExtensionPtr> extensions;
|
||||
bool reshape = true;
|
||||
std::map<std::string, InferenceEngine::Parameter> params;
|
||||
std::vector<PortIE> outputs;
|
||||
std::string type;
|
||||
int initialized;
|
||||
|
||||
void addExtension(const InferenceEngine::IShapeInferExtensionPtr& ext);
|
||||
std::vector<InferenceEngine::IShapeInferExtensionPtr> getExtensions();
|
||||
};
|
||||
|
||||
} // namespace op
|
||||
} // namespace ngraph
|
@ -29,7 +29,6 @@
|
||||
#include "ie_blob_stream.hpp"
|
||||
#include "caseless.hpp"
|
||||
#include <ie_ngraph_utils.hpp>
|
||||
#include "generic_ie.hpp"
|
||||
#include "precision_utils.h"
|
||||
#include "blob_factory.hpp"
|
||||
|
||||
@ -367,8 +366,6 @@ void V10Parser::XmlDeserializer::on_adapter(const std::string& name, ngraph::Val
|
||||
} else {
|
||||
THROW_IE_EXCEPTION << "Error: not recognized adapter name: " << name << ".";
|
||||
}
|
||||
// Disabled reshape for generic operations in the TI body
|
||||
ngraph::op::GenericIE::DisableReshape noReshape(ngraph_function);
|
||||
adapter.set(ngraph_function);
|
||||
}
|
||||
|
||||
@ -490,7 +487,6 @@ std::shared_ptr<ngraph::Function> V10Parser::XmlDeserializer::parse_function(con
|
||||
|
||||
OV_ITT_TASK_NEXT(taskChain, "ConstructNgraphFunction");
|
||||
|
||||
::ngraph::op::GenericIE::DisableReshape noReshape(allNodes);
|
||||
auto function = std::make_shared<ngraph::Function>(result_nodes, sink_nodes, parameter_nodes, GetStrAttr(root, "name", ""));
|
||||
for (const auto& sink : sink_nodes) {
|
||||
if (const auto& assign = std::dynamic_pointer_cast<ngraph::op::AssignBase>(sink)) {
|
||||
@ -764,7 +760,7 @@ std::shared_ptr<ngraph::Node> V10Parser::XmlDeserializer::createNode(
|
||||
|
||||
std::shared_ptr<ngraph::Node> ngraphNode;
|
||||
|
||||
// Find registerd opset
|
||||
// Find registered opset
|
||||
auto opsetIt = opsets.find(params.version);
|
||||
|
||||
// Try to create operation from loaded opsets
|
||||
@ -818,52 +814,6 @@ std::shared_ptr<ngraph::Node> V10Parser::XmlDeserializer::createNode(
|
||||
ngraphNode = ngraphNode->clone_with_new_inputs(ngraphNode->input_values());
|
||||
}
|
||||
|
||||
// Create GenericIE operation for backward compatibility
|
||||
if (!ngraphNode && (params.version == "experimental" || params.version == "extension")) {
|
||||
// Try to create Generic node for backward compatibility
|
||||
std::map<std::string, Parameter> parameters;
|
||||
pugi::xml_node dn = node.child("data");
|
||||
if (dn) {
|
||||
for (const auto& attr : dn.attributes()) {
|
||||
parameters[attr.name()] = std::string(attr.value());
|
||||
}
|
||||
}
|
||||
|
||||
auto blobs = node.child("blobs");
|
||||
if (!blobs.empty()) {
|
||||
size_t length = weights->byteSize();
|
||||
|
||||
for (pugi::xml_node blob = blobs.first_child(); !blob.empty(); blob = blob.next_sibling()) {
|
||||
size_t size = GetUInt64Attr(blob, "size", 0);
|
||||
uint64_t offset = GetUInt64Attr(blob, "offset", 0);
|
||||
Precision precision(Precision::U8);
|
||||
const std::string& preStr = GetStrAttr(blob, "precision", "");
|
||||
if (!preStr.empty())
|
||||
precision = Precision::FromStr(preStr);
|
||||
if (!size) continue;
|
||||
if (!length)
|
||||
THROW_IE_EXCEPTION << "Cannot read network! The model requires weights data! "
|
||||
<< "Bin file cannot be found! Please specify the path to bin file.";
|
||||
if (static_cast<uint64_t>(length) < offset + size)
|
||||
THROW_IE_EXCEPTION << "Cannot create " << params.type << " layer with name: " << params.name
|
||||
<< ". Layer has incorrect weights!";
|
||||
uint8_t* data = weights->cbuffer().as<uint8_t*>() + offset;
|
||||
Blob::Ptr wBlob = make_shared_blob<uint8_t>({Precision::U8, { size / precision.size() }, C }, data);
|
||||
|
||||
parameters[blob.name()] = wBlob;
|
||||
}
|
||||
}
|
||||
std::vector<ngraph::op::GenericIE::PortIE> outputs;
|
||||
for (const auto& port : params.outputPorts) {
|
||||
ngraph::op::GenericIE::PortIE iePort;
|
||||
iePort.dims = port.dims;
|
||||
iePort.precision = InferenceEngine::details::convertPrecision(port.precision);
|
||||
outputs.emplace_back(iePort);
|
||||
}
|
||||
|
||||
ngraphNode = std::make_shared<ngraph::op::GenericIE>(inputs, parameters, params.type, outputs);
|
||||
}
|
||||
|
||||
if (!ngraphNode) {
|
||||
THROW_IE_EXCEPTION << "Cannot create " << params.type << " layer " << params.name << " id:" << params.layerId
|
||||
<< " from unsupported opset: " << params.version;
|
||||
|
@ -68,7 +68,6 @@ private:
|
||||
struct GenericLayerParams {
|
||||
struct LayerPortData {
|
||||
size_t portId;
|
||||
// Precision and dimensions are needed only for GenericIE op
|
||||
ngraph::element::Type_t precision;
|
||||
SizeVector dims;
|
||||
std::unordered_set<std::string> names;
|
||||
|
@ -22,7 +22,6 @@ class TRANSFORMATIONS_API Serialize;
|
||||
* @brief Serialize transformation converts ngraph::Function into IR files
|
||||
* @attention
|
||||
* - dynamic shapes are not supported
|
||||
* - GenericIE operation type (experimental opset) is not supported
|
||||
* - order of generated layers in xml file is ngraph specific (given by
|
||||
* get_ordered_ops()); MO generates file with different order, but they are
|
||||
* logically equivalent
|
||||
|
@ -29,7 +29,6 @@ bool fuse_type_to_nms5(std::shared_ptr<ngraph::Node> & node, ngraph::element::Ty
|
||||
bool fuse_type_to_topk(std::shared_ptr<ngraph::Node> & node, ngraph::element::Type to, size_t idx);
|
||||
bool fuse_type_to_nonzero(std::shared_ptr<ngraph::Node> & node, ngraph::element::Type to, size_t idx);
|
||||
bool fuse_type_to_bucketize(std::shared_ptr<ngraph::Node> & node, ngraph::element::Type to, size_t idx);
|
||||
bool fuse_type_to_generic_ie(std::shared_ptr<ngraph::Node> & node, ngraph::element::Type to, size_t idx);
|
||||
|
||||
bool extend_select_type(std::shared_ptr<ngraph::Node> & node, ngraph::element::Type to, size_t idx);
|
||||
|
||||
@ -91,7 +90,6 @@ bool ngraph::pass::ConvertPrecision::run_on_function(std::shared_ptr<ngraph::Fun
|
||||
{opset4::TopK::type_info, fuse_type_to_topk},
|
||||
{opset4::NonZero::type_info, fuse_type_to_nonzero},
|
||||
{opset4::Bucketize::type_info, fuse_type_to_bucketize},
|
||||
{NodeTypeInfo("GenericIE", 1), fuse_type_to_generic_ie},
|
||||
{opset4::Equal::type_info, fuse_type_to_binary_comparision<opset4::Equal>},
|
||||
{opset4::NotEqual::type_info, fuse_type_to_binary_comparision<opset4::NotEqual>},
|
||||
{opset4::Greater::type_info, fuse_type_to_binary_comparision<opset4::Greater>},
|
||||
@ -282,12 +280,6 @@ bool fuse_type_to_bucketize(std::shared_ptr<ngraph::Node> & node, ngraph::elemen
|
||||
return false;
|
||||
}
|
||||
|
||||
bool fuse_type_to_generic_ie(std::shared_ptr<ngraph::Node> & node, ngraph::element::Type to, size_t idx) {
|
||||
node->set_output_type(idx, to, node->output(idx).get_partial_shape());
|
||||
// return false as we do not replace original node
|
||||
return false;
|
||||
}
|
||||
|
||||
bool fuse_type_to_shapeof_v0(std::shared_ptr<ngraph::Node> & node, ngraph::element::Type to, size_t idx) {
|
||||
if (auto type_relaxed = std::dynamic_pointer_cast<op::TypeRelaxedBase>(node)) {
|
||||
type_relaxed->set_overridden_output_type(to);
|
||||
|
@ -267,17 +267,9 @@ public:
|
||||
}
|
||||
void on_adapter(const std::string& name,
|
||||
ngraph::ValueAccessor<std::string>& adapter) override {
|
||||
if ((m_node_type_name == "GenericIE") &&
|
||||
(name == "__generic_ie_type__")) {
|
||||
// __generic_ie_type__ in GenericIE should not be serialized as a
|
||||
// <data> since it's purpose is to hold name of the layer type
|
||||
// it is a WA to not introduce dependency on plugin_api library
|
||||
m_node_type_name = adapter.get();
|
||||
} else {
|
||||
m_xml_node.append_attribute(name.c_str())
|
||||
.set_value(adapter.get().c_str());
|
||||
}
|
||||
}
|
||||
void on_adapter(const std::string& name,
|
||||
ngraph::ValueAccessor<int64_t>& adapter) override {
|
||||
m_xml_node.append_attribute(name.c_str()).set_value(adapter.get());
|
||||
|
@ -20,7 +20,6 @@
|
||||
#include <string>
|
||||
|
||||
#include <legacy/convert_function_to_cnn_network.hpp>
|
||||
#include <generic_ie.hpp>
|
||||
#include <ngraph/pass/manager.hpp>
|
||||
#include <ngraph/opsets/opset3.hpp>
|
||||
#include <ngraph/opsets/opset4.hpp>
|
||||
@ -164,8 +163,6 @@ ModelPtr FrontEnd::buildInitialModel(const ie::CNNNetwork& network) {
|
||||
|
||||
ie::CNNNetwork FrontEnd::convertNetwork(ie::CNNNetwork& network) {
|
||||
auto nGraphFunc = network.getFunction();
|
||||
// Disable shape inference (WA for generic operations)
|
||||
ngraph::op::GenericIE::DisableReshape noReshape(nGraphFunc);
|
||||
|
||||
ngraph::pass::Manager manager;
|
||||
manager.register_pass<::ngraph::pass::InitNodeInfo>();
|
||||
|
@ -4,12 +4,11 @@
|
||||
|
||||
#include "vpu/utils/runtime_graph.hpp"
|
||||
|
||||
#include "generic_ie.hpp"
|
||||
|
||||
#include <legacy/ie_util_internal.hpp>
|
||||
#include <ie_ngraph_utils.hpp>
|
||||
#include <exec_graph_info.hpp>
|
||||
#include <ngraph/variant.hpp>
|
||||
#include <ngraph/ngraph.hpp>
|
||||
|
||||
#include <vector>
|
||||
#include <map>
|
||||
|
@ -21,8 +21,6 @@
|
||||
#include <transformations/common_optimizations/common_optimizations.hpp>
|
||||
#include <ngraph/pass/manager.hpp>
|
||||
|
||||
#include "generic_ie.hpp"
|
||||
|
||||
#include "myriad_plugin.h"
|
||||
|
||||
using namespace InferenceEngine;
|
||||
|
@ -206,13 +206,11 @@ endif()
|
||||
#
|
||||
|
||||
ie_headers_compilation_with_custom_flags(TEST_SUFFIX PluginApiCxx17
|
||||
HEADERS_TO_SKIP "generic_ie.hpp"
|
||||
CXX_STANDARD 17 PLUGIN_API)
|
||||
|
||||
if(UNIX)
|
||||
if(CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
|
||||
ie_headers_compilation_with_custom_flags(TEST_SUFFIX PluginApiWarningsAreErrors
|
||||
HEADERS_TO_SKIP "generic_ie.hpp"
|
||||
FLAGS "-Werror-all -Werror -Wall"
|
||||
PLUGIN_API)
|
||||
else()
|
||||
|
@ -17,7 +17,6 @@
|
||||
#include <ie_parameter.hpp>
|
||||
#include <ie_core.hpp>
|
||||
#include <legacy/net_pass.h>
|
||||
#include <generic_ie.hpp>
|
||||
#include <legacy/convert_function_to_cnn_network.hpp>
|
||||
#include <legacy/transformations/convert_opset1_to_legacy/convert_opset1_to_legacy.hpp>
|
||||
#include <ngraph/pass/manager.hpp>
|
||||
@ -1438,8 +1437,6 @@ TEST(CNNNGraphImplTests, SaveOriginalResultNameForMultiOutputOp) {
|
||||
}
|
||||
|
||||
auto nGraphFunc = network.getFunction();
|
||||
// Disable shape inference (WA for generic operations)
|
||||
ngraph::op::GenericIE::DisableReshape noReshape(nGraphFunc);
|
||||
|
||||
ngraph::pass::Manager manager;
|
||||
|
||||
@ -1630,8 +1627,6 @@ TEST(CNNNGraphImplTests, SaveOriginalResultNameForMultiOutputOpOpset6) {
|
||||
}
|
||||
|
||||
auto nGraphFunc = network.getFunction();
|
||||
// Disable shape inference (WA for generic operations)
|
||||
ngraph::op::GenericIE::DisableReshape noReshape(nGraphFunc);
|
||||
|
||||
ngraph::pass::Manager manager;
|
||||
|
||||
|
@ -20,7 +20,6 @@
|
||||
#include <legacy/transformations/convert_opset1_to_legacy/convert_opset1_to_legacy.hpp>
|
||||
#include <transformations/opset_conversions/convert_opset2_to_opset1.hpp>
|
||||
#include <transformations/opset_conversions/convert_opset3_to_opset2.hpp>
|
||||
#include "generic_ie.hpp"
|
||||
#include "shared_test_classes/base/low_precision_transformations/layer_transformation.hpp"
|
||||
|
||||
using namespace testing;
|
||||
|
@ -167,7 +167,7 @@ TEST(NetReaderTest, IRSupportModelDetection) {
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer name="Abs" id="1" type="Abs" version="experimental">
|
||||
<layer name="Abs" id="1" type="Abs" version="opset1">
|
||||
<input>
|
||||
<port id="1" precision="FP32">
|
||||
<dim>1</dim>
|
||||
|
@ -17,7 +17,6 @@
|
||||
#include <ngraph/pass/manager.hpp>
|
||||
|
||||
#include "common_test_utils/ngraph_test_utils.hpp"
|
||||
#include "generic_ie.hpp"
|
||||
|
||||
#include "legacy/convert_function_to_cnn_network.hpp"
|
||||
|
||||
|
@ -3,7 +3,6 @@
|
||||
//
|
||||
|
||||
#include <string>
|
||||
#include <generic_ie.hpp>
|
||||
#include "ngraph/opsets/opset6.hpp"
|
||||
#include "ngraph_reader_tests.hpp"
|
||||
TEST_F(NGraphReaderTests, ReadProposalNetwork) {
|
||||
|
@ -30,7 +30,6 @@
|
||||
#include "common_test_utils/data_utils.hpp"
|
||||
#include "common_test_utils/file_utils.hpp"
|
||||
#include "common_test_utils/common_utils.hpp"
|
||||
#include "generic_ie.hpp"
|
||||
|
||||
IE_SUPPRESS_DEPRECATED_START
|
||||
|
||||
|
@ -10,7 +10,6 @@
|
||||
#include <string>
|
||||
|
||||
#include <ie_core.hpp>
|
||||
#include "generic_ie.hpp"
|
||||
|
||||
#include <legacy/net_pass.h>
|
||||
#include <legacy/graph_transformer.h>
|
||||
@ -79,8 +78,6 @@ InferenceEngine::CNNNetwork convert(std::shared_ptr<ngraph::Function> function)
|
||||
std::dynamic_pointer_cast<const ::ngraph::opset3::ShuffleChannels>(node);
|
||||
};
|
||||
auto nGraphFunc = clonedNetwork.getFunction();
|
||||
// Disable shape inference (WA for generic operations)
|
||||
::ngraph::op::GenericIE::DisableReshape noReshape(nGraphFunc);
|
||||
|
||||
// Note: instead of running all Conversion Transformations you can make up your own transformation pipeline
|
||||
ngraph::pass::Manager manager;
|
||||
|
@ -10,7 +10,6 @@
|
||||
#include <string>
|
||||
|
||||
#include <ie_core.hpp>
|
||||
#include "generic_ie.hpp"
|
||||
|
||||
#include <legacy/net_pass.h>
|
||||
#include <legacy/graph_transformer.h>
|
||||
@ -112,8 +111,6 @@ InferenceEngine::CNNNetwork convert(std::shared_ptr<ngraph::Function> function)
|
||||
std::dynamic_pointer_cast<const ::ngraph::opset4::SoftPlus>(node);
|
||||
};
|
||||
auto nGraphFunc = clonedNetwork.getFunction();
|
||||
// Disable shape inference (WA for generic operations)
|
||||
::ngraph::op::GenericIE::DisableReshape noReshape(nGraphFunc);
|
||||
|
||||
// Note: instead of running all Conversion Transformations you can make up your own transformation pipeline
|
||||
ngraph::pass::Manager manager;
|
||||
|
@ -23,55 +23,6 @@ struct extension_params {
|
||||
std::map<std::string, std::string> config;
|
||||
};
|
||||
|
||||
class FakePrimitiveImpl : public InferenceEngine::ILayerExecImpl {
|
||||
public:
|
||||
FakePrimitiveImpl(const InferenceEngine::CNNLayer *layer) {
|
||||
cnnLayer = const_cast<InferenceEngine::CNNLayer *>(layer);
|
||||
}
|
||||
InferenceEngine::StatusCode getSupportedConfigurations(std::vector<InferenceEngine::LayerConfig>& conf, InferenceEngine::ResponseDesc *resp) noexcept override {
|
||||
InferenceEngine::LayerConfig config;
|
||||
config.dynBatchSupport = true;
|
||||
if (cnnLayer->outData.size() != 1 && cnnLayer->insData.size() != 1)
|
||||
return InferenceEngine::GENERAL_ERROR;
|
||||
InferenceEngine::DataConfig cfg;
|
||||
cfg.constant = false;
|
||||
cfg.inPlace = 0;
|
||||
InferenceEngine::SizeVector order;
|
||||
for(size_t i = 0; i < cnnLayer->outData[0]->getTensorDesc().getDims().size(); i++) {
|
||||
order.push_back(i);
|
||||
}
|
||||
cfg.desc = InferenceEngine::TensorDesc(cnnLayer->outData[0]->getTensorDesc().getPrecision(),
|
||||
cnnLayer->outData[0]->getTensorDesc().getDims(),
|
||||
{cnnLayer->outData[0]->getTensorDesc().getDims(), order});
|
||||
config.outConfs.push_back(cfg);
|
||||
config.inConfs.push_back(cfg);
|
||||
conf.push_back(config);
|
||||
return InferenceEngine::OK;
|
||||
}
|
||||
InferenceEngine::StatusCode init(InferenceEngine::LayerConfig& config, InferenceEngine::ResponseDesc *resp) noexcept override {
|
||||
return InferenceEngine::OK;
|
||||
}
|
||||
InferenceEngine::StatusCode execute(std::vector<InferenceEngine::Blob::Ptr>& inputs, std::vector<InferenceEngine::Blob::Ptr>& outputs, InferenceEngine::ResponseDesc *resp) noexcept override {
|
||||
return InferenceEngine::OK;
|
||||
}
|
||||
|
||||
private:
|
||||
InferenceEngine::CNNLayer* cnnLayer;
|
||||
};
|
||||
|
||||
class TestExtension : public InferenceEngine::IExtension {
|
||||
public:
|
||||
void Release() noexcept override { delete this; }
|
||||
|
||||
void GetVersion(const InferenceEngine::Version *&versionInfo) const noexcept override
|
||||
{
|
||||
static const InferenceEngine::Version VERSION{{}, "", ""};
|
||||
versionInfo = &VERSION;
|
||||
}
|
||||
|
||||
void Unload() noexcept override {}
|
||||
};
|
||||
|
||||
class NewFakePrimitiveImpl : public InferenceEngine::ILayerExecImpl {
|
||||
public:
|
||||
NewFakePrimitiveImpl(const std::shared_ptr<ngraph::Node>& node): node(node) {}
|
||||
@ -179,7 +130,7 @@ public:
|
||||
if (opsets.empty()) {
|
||||
ngraph::OpSet opset;
|
||||
opset.insert<FakeTestOp>();
|
||||
opsets["experimental"] = opset;
|
||||
opsets["custom_opset"] = opset;
|
||||
}
|
||||
return opsets;
|
||||
}
|
||||
@ -248,7 +199,7 @@ protected:
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer name="fake_layer" id="1" type="Fake" version="experimental" precision="FP32">
|
||||
<layer name="fake_layer" id="1" type="Fake" version="custom_opset" precision="FP32">
|
||||
<input>
|
||||
<port id="1">
|
||||
<dim>1</dim>
|
||||
@ -291,20 +242,14 @@ protected:
|
||||
|
||||
Blob::Ptr weights;
|
||||
CNNNetwork cnnNet1 = ie.ReadNetwork(model, weights);
|
||||
CNNNetwork cnnNet2 = ie2.ReadNetwork(model, weights);
|
||||
ASSERT_NO_THROW(ie.LoadNetwork(cnnNet1, device));
|
||||
ASSERT_THROW(ie2.LoadNetwork(cnnNet2, device), details::InferenceEngineException);
|
||||
ASSERT_THROW(ie2.ReadNetwork(model, weights), details::InferenceEngineException);
|
||||
} catch (const InferenceEngine::details::InferenceEngineException& e) {
|
||||
FAIL() << e.what();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
/*************************************************
|
||||
* !!! !!! !!! !!! !!! !!! !!! !!! !!! !!! !!! !!!
|
||||
* All ref values was obtained from Caffe scoring
|
||||
* !!! !!! !!! !!! !!! !!! !!! !!! !!! !!! !!! !!!
|
||||
*************************************************/
|
||||
#ifndef ENABLE_MKL_DNN
|
||||
#include "disable_tests.hpp"
|
||||
#endif
|
||||
|
Loading…
Reference in New Issue
Block a user