Remove deprecated methods from node (#1369)
This commit is contained in:
parent
cdd5605c61
commit
82aa1e112d
@ -352,11 +352,11 @@ CNNNetworkNGraphImpl::reshape(const std::map<std::string, std::vector<size_t>>&
|
|||||||
std::unordered_set<std::string> opName;
|
std::unordered_set<std::string> opName;
|
||||||
for (const auto & layer : specialized_ngraph_function->get_ordered_ops()) {
|
for (const auto & layer : specialized_ngraph_function->get_ordered_ops()) {
|
||||||
if (std::dynamic_pointer_cast<::ngraph::op::Result>(layer)) {
|
if (std::dynamic_pointer_cast<::ngraph::op::Result>(layer)) {
|
||||||
IE_ASSERT(layer->get_inputs().size() == 1);
|
IE_ASSERT(layer->inputs().size() == 1);
|
||||||
const auto& input = layer->get_inputs()[0];
|
const auto& output = layer->input(0).get_source_output();
|
||||||
std::string outName = input.get_output().get_node()->get_friendly_name();
|
std::string outName = output.get_node()->get_friendly_name();
|
||||||
if (input.get_output().get_node()->get_output_size() != 1)
|
if (output.get_node()->get_output_size() != 1)
|
||||||
outName += "." + std::to_string(input.get_output().get_index());
|
outName += "." + std::to_string(output.get_index());
|
||||||
addOutput(outName);
|
addOutput(outName);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
@ -325,7 +325,7 @@ InferenceEngine::details::CNNLayerCreator::CNNLayerCreator(const std::shared_ptr
|
|||||||
|
|
||||||
Builder::NodeConverter<::ngraph::op::Constant> converter;
|
Builder::NodeConverter<::ngraph::op::Constant> converter;
|
||||||
|
|
||||||
const auto weightsNode = castedLayer->get_inputs()[1].get_output().get_node();
|
const auto weightsNode = castedLayer->input(1).get_source_output().get_node_shared_ptr();
|
||||||
if (converter.canCreate(weightsNode)) {
|
if (converter.canCreate(weightsNode)) {
|
||||||
const auto& weights = converter.createLayer(weightsNode);
|
const auto& weights = converter.createLayer(weightsNode);
|
||||||
res->blobs["weights"] = weights->blobs["custom"];
|
res->blobs["weights"] = weights->blobs["custom"];
|
||||||
@ -351,7 +351,7 @@ InferenceEngine::details::CNNLayerCreator::CNNLayerCreator(const std::shared_ptr
|
|||||||
res->params = params;
|
res->params = params;
|
||||||
return res;
|
return res;
|
||||||
});
|
});
|
||||||
|
|
||||||
addSpecificCreator({"Assign"}, [](const std::shared_ptr<::ngraph::Node>& node,
|
addSpecificCreator({"Assign"}, [](const std::shared_ptr<::ngraph::Node>& node,
|
||||||
const std::map<std::string, std::string> params) -> CNNLayerPtr {
|
const std::map<std::string, std::string> params) -> CNNLayerPtr {
|
||||||
LayerParams attrs = {node->get_friendly_name(), "Memory",
|
LayerParams attrs = {node->get_friendly_name(), "Memory",
|
||||||
@ -706,8 +706,8 @@ void convertFunctionToICNNNetwork(const std::shared_ptr<const ::ngraph::Function
|
|||||||
const std::unordered_set<std::string> &names,
|
const std::unordered_set<std::string> &names,
|
||||||
bool keep_constant) -> bool {
|
bool keep_constant) -> bool {
|
||||||
if (auto constantNode = ::ngraph::as_type_ptr<::ngraph::op::Constant>(node)) {
|
if (auto constantNode = ::ngraph::as_type_ptr<::ngraph::op::Constant>(node)) {
|
||||||
for (const auto &consumerInputPort : constantNode->get_outputs()[0].get_inputs()) {
|
for (const auto &consumerInputPort : constantNode->output(0).get_target_inputs()) {
|
||||||
const auto &consumerLayer = consumerInputPort->get_node();
|
const auto &consumerLayer = consumerInputPort.get_node()->shared_from_this();
|
||||||
if (names.find(consumerLayer->get_name()) == names.end())
|
if (names.find(consumerLayer->get_name()) == names.end())
|
||||||
continue;
|
continue;
|
||||||
if (!isInternalConstLayer(constantNode, consumerLayer, keep_constant))
|
if (!isInternalConstLayer(constantNode, consumerLayer, keep_constant))
|
||||||
@ -779,7 +779,7 @@ void convertFunctionToICNNNetwork(const std::shared_ptr<const ::ngraph::Function
|
|||||||
|
|
||||||
size_t inputCount(0);
|
size_t inputCount(0);
|
||||||
for (size_t i = 0; i < layer->get_input_size(); i++) {
|
for (size_t i = 0; i < layer->get_input_size(); i++) {
|
||||||
const auto &constant = ngraph::as_type_ptr<ngraph::op::Constant>(layer->get_inputs()[i].get_output().get_node());
|
const auto &constant = ngraph::as_type_ptr<ngraph::op::Constant>(layer->input(i).get_source_output().get_node_shared_ptr());
|
||||||
if (constant && isInternalConstLayer(constant, layer, keep_constants)) {
|
if (constant && isInternalConstLayer(constant, layer, keep_constants)) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
@ -855,7 +855,7 @@ void convertFunctionToICNNNetwork(const std::shared_ptr<const ::ngraph::Function
|
|||||||
if (std::dynamic_pointer_cast<::ngraph::op::ReadValue>(layer))
|
if (std::dynamic_pointer_cast<::ngraph::op::ReadValue>(layer))
|
||||||
continue;
|
continue;
|
||||||
if (std::dynamic_pointer_cast<::ngraph::op::Result>(layer)) {
|
if (std::dynamic_pointer_cast<::ngraph::op::Result>(layer)) {
|
||||||
IE_ASSERT(layer->get_inputs().size() == 1);
|
IE_ASSERT(layer->inputs().size() == 1);
|
||||||
const auto &input = layer->input_value(0);
|
const auto &input = layer->input_value(0);
|
||||||
std::string outName = input.get_node_shared_ptr()->get_friendly_name();
|
std::string outName = input.get_node_shared_ptr()->get_friendly_name();
|
||||||
if (input.get_node_shared_ptr()->get_output_size() != 1)
|
if (input.get_node_shared_ptr()->get_output_size() != 1)
|
||||||
|
@ -261,7 +261,7 @@ CNNLayer::Ptr NodeConverter<ngraph::op::TensorIterator>::createLayer(const std::
|
|||||||
|
|
||||||
// Port map: outputs
|
// Port map: outputs
|
||||||
for (const auto& desc : tensor_iterator->get_output_descriptions()) {
|
for (const auto& desc : tensor_iterator->get_output_descriptions()) {
|
||||||
auto result = results[desc->m_body_value_index]->inputs()[0].get_source_output();
|
auto result = results[desc->m_body_value_index]->input(0).get_source_output();
|
||||||
|
|
||||||
// GetOutputElement layer can be inserted by ngraph deep copy functions
|
// GetOutputElement layer can be inserted by ngraph deep copy functions
|
||||||
// (e.g. specialize_function, clone_function)
|
// (e.g. specialize_function, clone_function)
|
||||||
@ -785,7 +785,7 @@ CNNLayer::Ptr NodeConverter<ngraph::op::ConvolutionIE>::createLayer(
|
|||||||
res->_weights = weights->blobs["custom"];
|
res->_weights = weights->blobs["custom"];
|
||||||
|
|
||||||
if (castedLayer->inputs().size() == 3) {
|
if (castedLayer->inputs().size() == 3) {
|
||||||
const auto biasNode = castedLayer->get_inputs()[2].get_output().get_node();
|
const auto biasNode = castedLayer->input_value(2).get_node_shared_ptr();
|
||||||
if (converter.canCreate(biasNode)) {
|
if (converter.canCreate(biasNode)) {
|
||||||
const auto& bias = converter.createLayer(biasNode);
|
const auto& bias = converter.createLayer(biasNode);
|
||||||
res->blobs["biases"] = bias->blobs["custom"];
|
res->blobs["biases"] = bias->blobs["custom"];
|
||||||
@ -853,7 +853,7 @@ CNNLayer::Ptr NodeConverter<ngraph::op::DeconvolutionIE>::createLayer(
|
|||||||
res->_weights = weights->blobs["custom"];
|
res->_weights = weights->blobs["custom"];
|
||||||
|
|
||||||
if (castedLayer->inputs().size() == 3) {
|
if (castedLayer->inputs().size() == 3) {
|
||||||
const auto biasNode = castedLayer->get_inputs()[2].get_output().get_node();
|
const auto biasNode = castedLayer->input_value(2).get_node_shared_ptr();
|
||||||
if (converter.canCreate(biasNode)) {
|
if (converter.canCreate(biasNode)) {
|
||||||
const auto& bias = converter.createLayer(biasNode);
|
const auto& bias = converter.createLayer(biasNode);
|
||||||
res->blobs["biases"] = bias->blobs["custom"];
|
res->blobs["biases"] = bias->blobs["custom"];
|
||||||
@ -1137,7 +1137,7 @@ CNNLayer::Ptr NodeConverter<ngraph::op::PRelu>::createLayer(const std::shared_pt
|
|||||||
auto castedLayer = ngraph::as_type_ptr<ngraph::op::PRelu>(layer);
|
auto castedLayer = ngraph::as_type_ptr<ngraph::op::PRelu>(layer);
|
||||||
if (castedLayer == nullptr) THROW_IE_EXCEPTION << "Cannot get " << params.type << " layer " << params.name;
|
if (castedLayer == nullptr) THROW_IE_EXCEPTION << "Cannot get " << params.type << " layer " << params.name;
|
||||||
|
|
||||||
const auto weightsNode = castedLayer->input(1).get_source_output().get_node_shared_ptr();
|
const auto weightsNode = castedLayer->input_value(1).get_node_shared_ptr();
|
||||||
if (auto const_weights = ngraph::as_type_ptr<ngraph::op::Constant>(weightsNode)) {
|
if (auto const_weights = ngraph::as_type_ptr<ngraph::op::Constant>(weightsNode)) {
|
||||||
SizeVector dataShape = const_weights->get_shape();
|
SizeVector dataShape = const_weights->get_shape();
|
||||||
if (dataShape.size() >= 2 && ngraph::shape_size(dataShape) == dataShape[1]) {
|
if (dataShape.size() >= 2 && ngraph::shape_size(dataShape) == dataShape[1]) {
|
||||||
@ -1282,7 +1282,7 @@ CNNLayer::Ptr NodeConverter<ngraph::op::v1::Reshape>::createLayer(const std::sha
|
|||||||
THROW_IE_EXCEPTION << "Cannot get " << params.type << " layer " << params.name;
|
THROW_IE_EXCEPTION << "Cannot get " << params.type << " layer " << params.name;
|
||||||
|
|
||||||
|
|
||||||
const auto constNode = castedLayer->get_inputs()[1].get_output().get_node();
|
const auto constNode = castedLayer->input_value(1).get_node_shared_ptr();
|
||||||
if (auto constValue = ngraph::as_type_ptr<ngraph::op::Constant>(constNode)) {
|
if (auto constValue = ngraph::as_type_ptr<ngraph::op::Constant>(constNode)) {
|
||||||
auto value = constValue->cast_vector<int64_t>();
|
auto value = constValue->cast_vector<int64_t>();
|
||||||
for (auto & i : value) {
|
for (auto & i : value) {
|
||||||
@ -1344,14 +1344,14 @@ CNNLayer::Ptr NodeConverter<ngraph::op::ScaleShiftIE>::createLayer(const std::sh
|
|||||||
auto res = std::make_shared<InferenceEngine::ScaleShiftLayer>(params);
|
auto res = std::make_shared<InferenceEngine::ScaleShiftLayer>(params);
|
||||||
|
|
||||||
NodeConverter<ngraph::op::Constant> converter;
|
NodeConverter<ngraph::op::Constant> converter;
|
||||||
const auto weightsNode = layer->get_inputs()[1].get_output().get_node();
|
const auto weightsNode = layer->input_value(1).get_node_shared_ptr();
|
||||||
if (converter.canCreate(weightsNode)) {
|
if (converter.canCreate(weightsNode)) {
|
||||||
const auto& weightsLayer = converter.createLayer(weightsNode);
|
const auto& weightsLayer = converter.createLayer(weightsNode);
|
||||||
res->blobs["weights"] = weightsLayer->blobs["custom"];
|
res->blobs["weights"] = weightsLayer->blobs["custom"];
|
||||||
res->_weights = weightsLayer->blobs["custom"];
|
res->_weights = weightsLayer->blobs["custom"];
|
||||||
}
|
}
|
||||||
|
|
||||||
const auto biasNode = layer->get_inputs()[2].get_output().get_node();
|
const auto biasNode = layer->input_value(2).get_node_shared_ptr();
|
||||||
if (converter.canCreate(biasNode)) {
|
if (converter.canCreate(biasNode)) {
|
||||||
const auto& bias = converter.createLayer(biasNode);
|
const auto& bias = converter.createLayer(biasNode);
|
||||||
res->blobs["biases"] = bias->blobs["custom"];
|
res->blobs["biases"] = bias->blobs["custom"];
|
||||||
@ -1641,13 +1641,13 @@ CNNLayer::Ptr NodeConverter<ngraph::op::FullyConnected>::createLayer(const std::
|
|||||||
|
|
||||||
NodeConverter<ngraph::op::Constant> converter;
|
NodeConverter<ngraph::op::Constant> converter;
|
||||||
|
|
||||||
const auto weightsNode = layer->get_inputs()[1].get_output().get_node();
|
const auto weightsNode = layer->input_value(1).get_node_shared_ptr();
|
||||||
if (!keep_constants && converter.canCreate(weightsNode)) {
|
if (!keep_constants && converter.canCreate(weightsNode)) {
|
||||||
const auto& weights = converter.createLayer(weightsNode);
|
const auto& weights = converter.createLayer(weightsNode);
|
||||||
res->blobs["weights"] = weights->blobs["custom"];
|
res->blobs["weights"] = weights->blobs["custom"];
|
||||||
res->_weights = weights->blobs["custom"];
|
res->_weights = weights->blobs["custom"];
|
||||||
|
|
||||||
const auto biasNode = layer->get_inputs()[2].get_output().get_node();
|
const auto biasNode = layer->input_value(2).get_node_shared_ptr();
|
||||||
if (converter.canCreate(biasNode)) {
|
if (converter.canCreate(biasNode)) {
|
||||||
const auto& bias = converter.createLayer(biasNode);
|
const auto& bias = converter.createLayer(biasNode);
|
||||||
res->blobs["biases"] = bias->blobs["custom"];
|
res->blobs["biases"] = bias->blobs["custom"];
|
||||||
@ -1694,14 +1694,14 @@ CNNLayer::Ptr NodeConverter<ngraph::op::LSTMCellIE>::createLayer(const std::shar
|
|||||||
res->params["clip"] = asString(castedLayer->get_clip());
|
res->params["clip"] = asString(castedLayer->get_clip());
|
||||||
|
|
||||||
NodeConverter<ngraph::op::Constant> converter;
|
NodeConverter<ngraph::op::Constant> converter;
|
||||||
const auto weightsNode = layer->get_inputs()[3].get_output().get_node();
|
const auto weightsNode = layer->input_value(3).get_node_shared_ptr();
|
||||||
if (converter.canCreate(weightsNode)) {
|
if (converter.canCreate(weightsNode)) {
|
||||||
const auto& weights = converter.createLayer(weightsNode);
|
const auto& weights = converter.createLayer(weightsNode);
|
||||||
res->blobs["weights"] = weights->blobs["custom"];
|
res->blobs["weights"] = weights->blobs["custom"];
|
||||||
res->_weights = weights->blobs["custom"];
|
res->_weights = weights->blobs["custom"];
|
||||||
}
|
}
|
||||||
|
|
||||||
const auto biasNode = layer->get_inputs()[4].get_output().get_node();
|
const auto biasNode = layer->input_value(4).get_node_shared_ptr();
|
||||||
if (converter.canCreate(biasNode)) {
|
if (converter.canCreate(biasNode)) {
|
||||||
const auto& bias = converter.createLayer(biasNode);
|
const auto& bias = converter.createLayer(biasNode);
|
||||||
res->blobs["biases"] = bias->blobs["custom"];
|
res->blobs["biases"] = bias->blobs["custom"];
|
||||||
@ -1862,7 +1862,7 @@ CNNLayer::Ptr NodeConverter<ngraph::op::NormalizeIE>::createLayer(const std::sha
|
|||||||
res->params["across_spatial"] = castedLayer->get_across_spatial() ? "1" : "0";
|
res->params["across_spatial"] = castedLayer->get_across_spatial() ? "1" : "0";
|
||||||
|
|
||||||
NodeConverter<ngraph::op::Constant> converter;
|
NodeConverter<ngraph::op::Constant> converter;
|
||||||
const auto weightsNode = castedLayer->get_inputs()[1].get_output().get_node();
|
const auto weightsNode = castedLayer->input_value(1).get_node_shared_ptr();
|
||||||
if (converter.canCreate(weightsNode)) {
|
if (converter.canCreate(weightsNode)) {
|
||||||
const auto& weights = converter.createLayer(weightsNode);
|
const auto& weights = converter.createLayer(weightsNode);
|
||||||
res->blobs["weights"] = weights->blobs["custom"];
|
res->blobs["weights"] = weights->blobs["custom"];
|
||||||
|
@ -24,8 +24,8 @@ ngraph::graph_rewrite_callback callback = [](ngraph::pattern::Matcher& m) {
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (const auto& input : eltwise_node->get_inputs()) {
|
for (const auto& input : eltwise_node->inputs()) {
|
||||||
const auto& inputLayer = input.get_output().get_node();
|
const auto& inputLayer = input.get_source_output().get_node_shared_ptr();
|
||||||
auto const_node = std::dynamic_pointer_cast<ngraph::opset1::Constant>(inputLayer);
|
auto const_node = std::dynamic_pointer_cast<ngraph::opset1::Constant>(inputLayer);
|
||||||
if (!const_node) continue;
|
if (!const_node) continue;
|
||||||
|
|
||||||
|
@ -28,9 +28,9 @@ void ngraph::pass::ConvertOneHotToOneHotIE::convert_one_hot() {
|
|||||||
|
|
||||||
element::Type output_type = is_f16 ? element::f16 : element::f32;
|
element::Type output_type = is_f16 ? element::f16 : element::f32;
|
||||||
|
|
||||||
const auto depth_node = std::dynamic_pointer_cast<ngraph::opset1::Constant>(one_hot->get_inputs()[1].get_output().get_node());
|
const auto depth_node = std::dynamic_pointer_cast<ngraph::opset1::Constant>(one_hot->input(1).get_source_output().get_node_shared_ptr());
|
||||||
const auto on_value_node = std::dynamic_pointer_cast<ngraph::opset1::Constant>(one_hot->get_inputs()[2].get_output().get_node());
|
const auto on_value_node = std::dynamic_pointer_cast<ngraph::opset1::Constant>(one_hot->input(2).get_source_output().get_node_shared_ptr());
|
||||||
const auto off_value_node = std::dynamic_pointer_cast<ngraph::opset1::Constant>(one_hot->get_inputs()[3].get_output().get_node());
|
const auto off_value_node = std::dynamic_pointer_cast<ngraph::opset1::Constant>(one_hot->input(3).get_source_output().get_node_shared_ptr());
|
||||||
|
|
||||||
// can be converted iff inputs with depth, on/off values are constants
|
// can be converted iff inputs with depth, on/off values are constants
|
||||||
if (depth_node == nullptr || on_value_node == nullptr || off_value_node == nullptr) return false;
|
if (depth_node == nullptr || on_value_node == nullptr || off_value_node == nullptr) return false;
|
||||||
|
@ -142,10 +142,10 @@ void FrontEnd::detectNetworkBatch(
|
|||||||
VPU_THROW_FORMAT("Unsupported layer %s configuration: no outputs", layer->get_name());
|
VPU_THROW_FORMAT("Unsupported layer %s configuration: no outputs", layer->get_name());
|
||||||
|
|
||||||
// 1. Don't support if DetectionOutput is not the last layer in network
|
// 1. Don't support if DetectionOutput is not the last layer in network
|
||||||
for (const auto& outputHandle : layer->get_outputs()) {
|
for (const auto& outputHandle : layer->outputs()) {
|
||||||
for (const auto& inputHandle : outputHandle.get_inputs()) {
|
for (const auto& inputHandle : outputHandle.get_target_inputs()) {
|
||||||
auto outNode = inputHandle->get_node();
|
auto outNode = inputHandle.get_node();
|
||||||
if (std::dynamic_pointer_cast<::ngraph::opset3::Result>(outNode)) {
|
if (dynamic_cast<::ngraph::opset3::Result *>(outNode)) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
VPU_THROW_FORMAT("Unsupported layer %s configuration : it is not a network output", layer->get_name());
|
VPU_THROW_FORMAT("Unsupported layer %s configuration : it is not a network output", layer->get_name());
|
||||||
|
@ -91,7 +91,9 @@ namespace FuncTestUtils {
|
|||||||
|
|
||||||
if (layer->blobs.size() != refLayer->blobs.size()) {
|
if (layer->blobs.size() != refLayer->blobs.size()) {
|
||||||
err_log.push_back(
|
err_log.push_back(
|
||||||
"Layer " + layer->name + " and ref layer " + refLayer->name + " have different number of blobs: " +
|
"Layer " + layer->type + " with name " + layer->name +
|
||||||
|
" and ref layer " + layer->type + " with name " + refLayer->name +
|
||||||
|
" have different number of blobs: " +
|
||||||
std::to_string(layer->blobs.size()) + " and " + std::to_string(refLayer->blobs.size()));
|
std::to_string(layer->blobs.size()) + " and " + std::to_string(refLayer->blobs.size()));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -172,7 +172,7 @@ void Node::set_arguments(const OutputVector& arguments)
|
|||||||
for (auto& output : arguments)
|
for (auto& output : arguments)
|
||||||
{
|
{
|
||||||
auto output_node = output.get_node();
|
auto output_node = output.get_node();
|
||||||
auto& output_descriptor = output_node->get_outputs().at(output.get_index());
|
auto& output_descriptor = output_node->m_outputs.at(output.get_index());
|
||||||
m_inputs.emplace_back(this, i++, output_descriptor);
|
m_inputs.emplace_back(this, i++, output_descriptor);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -262,16 +262,6 @@ void Node::set_output_type(size_t i, const element::Type& element_type, const Pa
|
|||||||
get_output_descriptor(i).get_tensor_ptr()->set_tensor_type(element_type, pshape);
|
get_output_descriptor(i).get_tensor_ptr()->set_tensor_type(element_type, pshape);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::deque<descriptor::Output>& Node::get_outputs()
|
|
||||||
{
|
|
||||||
return m_outputs;
|
|
||||||
}
|
|
||||||
|
|
||||||
const std::deque<descriptor::Output>& Node::get_outputs() const
|
|
||||||
{
|
|
||||||
return m_outputs;
|
|
||||||
}
|
|
||||||
|
|
||||||
const std::string& Node::description() const
|
const std::string& Node::description() const
|
||||||
{
|
{
|
||||||
// Terrible transitional kludge to keep description working while we change
|
// Terrible transitional kludge to keep description working while we change
|
||||||
@ -638,23 +628,6 @@ const Shape& Node::get_shape() const
|
|||||||
return get_output_shape(0);
|
return get_output_shape(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
shared_ptr<descriptor::Tensor> Node::get_output_tensor_ptr(size_t i) const
|
|
||||||
{
|
|
||||||
NGRAPH_CHECK(
|
|
||||||
i < m_outputs.size(), "index '", i, "' out of range in get_output_tensor_ptr(size_t i)");
|
|
||||||
return m_outputs[i].get_tensor_ptr();
|
|
||||||
}
|
|
||||||
|
|
||||||
shared_ptr<descriptor::Tensor> Node::get_output_tensor_ptr() const
|
|
||||||
{
|
|
||||||
if (get_output_size() != 1)
|
|
||||||
{
|
|
||||||
throw ngraph_error(
|
|
||||||
"get_output_tensor_ptr() must be called on a node with exactly one output.");
|
|
||||||
}
|
|
||||||
return m_outputs[0].get_tensor_ptr();
|
|
||||||
}
|
|
||||||
|
|
||||||
std::set<Input<Node>> Node::get_output_target_inputs(size_t i) const
|
std::set<Input<Node>> Node::get_output_target_inputs(size_t i) const
|
||||||
{
|
{
|
||||||
std::set<Input<Node>> result;
|
std::set<Input<Node>> result;
|
||||||
@ -688,15 +661,6 @@ const string& Node::get_output_tensor_name(size_t i) const
|
|||||||
return m_outputs[i].get_tensor().get_name();
|
return m_outputs[i].get_tensor().get_name();
|
||||||
}
|
}
|
||||||
|
|
||||||
descriptor::Tensor& Node::get_output_tensor() const
|
|
||||||
{
|
|
||||||
if (get_output_size() != 1)
|
|
||||||
{
|
|
||||||
throw ngraph_error("get_output_tensor() must be called on a node with exactly one output.");
|
|
||||||
}
|
|
||||||
return get_output_tensor(0);
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t Node::get_input_size() const
|
size_t Node::get_input_size() const
|
||||||
{
|
{
|
||||||
return m_inputs.size();
|
return m_inputs.size();
|
||||||
|
@ -268,19 +268,6 @@ namespace ngraph
|
|||||||
/// \returns The stream os
|
/// \returns The stream os
|
||||||
virtual std::ostream& write_description(std::ostream& os, uint32_t depth = 0) const;
|
virtual std::ostream& write_description(std::ostream& os, uint32_t depth = 0) const;
|
||||||
|
|
||||||
std::deque<descriptor::Input>& get_inputs() NGRAPH_DEPRECATED("use inputs() instead")
|
|
||||||
{
|
|
||||||
return m_inputs;
|
|
||||||
}
|
|
||||||
const std::deque<descriptor::Input>& get_inputs() const
|
|
||||||
NGRAPH_DEPRECATED("use inputs() instead")
|
|
||||||
{
|
|
||||||
return m_inputs;
|
|
||||||
}
|
|
||||||
std::deque<descriptor::Output>& get_outputs() NGRAPH_DEPRECATED("use outputs() instead");
|
|
||||||
const std::deque<descriptor::Output>& get_outputs() const
|
|
||||||
NGRAPH_DEPRECATED("use outputs() instead");
|
|
||||||
|
|
||||||
/// Get control dependencies registered on the node
|
/// Get control dependencies registered on the node
|
||||||
const std::vector<std::shared_ptr<Node>>& get_control_dependencies() const;
|
const std::vector<std::shared_ptr<Node>>& get_control_dependencies() const;
|
||||||
|
|
||||||
@ -354,22 +341,6 @@ namespace ngraph
|
|||||||
/// Returns the tensor name for output i
|
/// Returns the tensor name for output i
|
||||||
const std::string& get_output_tensor_name(size_t i) const;
|
const std::string& get_output_tensor_name(size_t i) const;
|
||||||
|
|
||||||
/// Checks that there is exactly one output and returns its tensor.
|
|
||||||
descriptor::Tensor& get_output_tensor() const NGRAPH_DEPRECATED(
|
|
||||||
"use node->get_output_tensor(0) instead; insert a check that the node has only one "
|
|
||||||
"output, or update calling code not to assume only one output");
|
|
||||||
|
|
||||||
/// Returns the tensor of output i
|
|
||||||
// TODO: Investigate whether this really needs to be shared_ptr. If so, we'll need a
|
|
||||||
// replacement in Output.
|
|
||||||
std::shared_ptr<descriptor::Tensor> get_output_tensor_ptr(size_t i) const
|
|
||||||
NGRAPH_DEPRECATED("use &node->output(i).get_tensor() instead");
|
|
||||||
|
|
||||||
/// Checks that there is exactly one output and returns its tensor.
|
|
||||||
std::shared_ptr<descriptor::Tensor> get_output_tensor_ptr() const NGRAPH_DEPRECATED(
|
|
||||||
"use &node->output(i).get_tensor() instead; insert a check that the node has only one "
|
|
||||||
"output, or update calling code not to assume only one output");
|
|
||||||
|
|
||||||
std::set<Input<Node>> get_output_target_inputs(size_t i) const;
|
std::set<Input<Node>> get_output_target_inputs(size_t i) const;
|
||||||
|
|
||||||
/// Returns the number of inputs for the op
|
/// Returns the number of inputs for the op
|
||||||
|
@ -215,7 +215,7 @@ op::v1::ConvolutionBackpropData::ConvolutionBackpropData(const Output<Node>& dat
|
|||||||
bool op::v1::ConvolutionBackpropData::is_dynamic() const
|
bool op::v1::ConvolutionBackpropData::is_dynamic() const
|
||||||
{
|
{
|
||||||
bool is_dynamic = Node::is_dynamic();
|
bool is_dynamic = Node::is_dynamic();
|
||||||
if (get_inputs().size() == 3 && !is_dynamic)
|
if (inputs().size() == 3 && !is_dynamic)
|
||||||
{
|
{
|
||||||
return !is_type<op::Constant>(input_value(2).get_node());
|
return !is_type<op::Constant>(input_value(2).get_node());
|
||||||
}
|
}
|
||||||
@ -235,7 +235,7 @@ const PartialShape op::v1::ConvolutionBackpropData::get_output_shape() const
|
|||||||
{
|
{
|
||||||
shape = PartialShape{vector<Dimension>(m_strides.size())};
|
shape = PartialShape{vector<Dimension>(m_strides.size())};
|
||||||
}
|
}
|
||||||
bool is_output_shape_present = get_inputs().size() == 3;
|
bool is_output_shape_present = inputs().size() == 3;
|
||||||
if (is_output_shape_present)
|
if (is_output_shape_present)
|
||||||
{
|
{
|
||||||
if (auto const_op = as_type<op::Constant>(input_value(2).get_node()))
|
if (auto const_op = as_type<op::Constant>(input_value(2).get_node()))
|
||||||
@ -297,7 +297,7 @@ void op::v1::ConvolutionBackpropData::validate_and_infer_types()
|
|||||||
const PartialShape& filters_pshape = get_input_partial_shape(1);
|
const PartialShape& filters_pshape = get_input_partial_shape(1);
|
||||||
element::Type filters_et = get_input_element_type(1);
|
element::Type filters_et = get_input_element_type(1);
|
||||||
|
|
||||||
bool is_output_shape_present = get_inputs().size() == 3;
|
bool is_output_shape_present = inputs().size() == 3;
|
||||||
PartialShape output_pshape = get_output_shape();
|
PartialShape output_pshape = get_output_shape();
|
||||||
|
|
||||||
element::Type result_et;
|
element::Type result_et;
|
||||||
|
@ -265,7 +265,7 @@ bool ngraph::op::v1::GroupConvolutionBackpropData::visit_attributes(AttributeVis
|
|||||||
bool op::v1::GroupConvolutionBackpropData::is_dynamic() const
|
bool op::v1::GroupConvolutionBackpropData::is_dynamic() const
|
||||||
{
|
{
|
||||||
bool is_dynamic = Node::is_dynamic();
|
bool is_dynamic = Node::is_dynamic();
|
||||||
if (get_inputs().size() == 3 && !is_dynamic)
|
if (inputs().size() == 3 && !is_dynamic)
|
||||||
{
|
{
|
||||||
return !is_type<op::Constant>(input_value(2).get_node());
|
return !is_type<op::Constant>(input_value(2).get_node());
|
||||||
}
|
}
|
||||||
@ -285,7 +285,7 @@ const PartialShape op::v1::GroupConvolutionBackpropData::get_convolution_output_
|
|||||||
{
|
{
|
||||||
shape = PartialShape{vector<Dimension>(m_strides.size())};
|
shape = PartialShape{vector<Dimension>(m_strides.size())};
|
||||||
}
|
}
|
||||||
bool is_output_shape_present = get_inputs().size() == 3;
|
bool is_output_shape_present = inputs().size() == 3;
|
||||||
if (is_output_shape_present)
|
if (is_output_shape_present)
|
||||||
{
|
{
|
||||||
if (auto const_op = as_type<op::Constant>(input_value(2).get_node()))
|
if (auto const_op = as_type<op::Constant>(input_value(2).get_node()))
|
||||||
@ -411,7 +411,7 @@ void op::v1::GroupConvolutionBackpropData::pre_validate_and_infer_types()
|
|||||||
"spatial features.");
|
"spatial features.");
|
||||||
}
|
}
|
||||||
|
|
||||||
bool is_output_shape_present = get_inputs().size() == 3;
|
bool is_output_shape_present = inputs().size() == 3;
|
||||||
PartialShape output_pshape;
|
PartialShape output_pshape;
|
||||||
|
|
||||||
// If output shape is provided, ignore current values for padding begin/end
|
// If output shape is provided, ignore current values for padding begin/end
|
||||||
|
@ -116,7 +116,7 @@ void op::v1::NonMaxSuppression::validate_and_infer_types()
|
|||||||
"Expected a 3D tensor for the 'scores' input. Got: ",
|
"Expected a 3D tensor for the 'scores' input. Got: ",
|
||||||
scores_ps);
|
scores_ps);
|
||||||
|
|
||||||
if (get_inputs().size() >= 3)
|
if (inputs().size() >= 3)
|
||||||
{
|
{
|
||||||
const auto max_boxes_ps = get_input_partial_shape(2);
|
const auto max_boxes_ps = get_input_partial_shape(2);
|
||||||
NODE_VALIDATION_CHECK(this,
|
NODE_VALIDATION_CHECK(this,
|
||||||
@ -125,7 +125,7 @@ void op::v1::NonMaxSuppression::validate_and_infer_types()
|
|||||||
max_boxes_ps);
|
max_boxes_ps);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (get_inputs().size() >= 4)
|
if (inputs().size() >= 4)
|
||||||
{
|
{
|
||||||
const auto iou_threshold_ps = get_input_partial_shape(3);
|
const auto iou_threshold_ps = get_input_partial_shape(3);
|
||||||
NODE_VALIDATION_CHECK(this,
|
NODE_VALIDATION_CHECK(this,
|
||||||
@ -135,7 +135,7 @@ void op::v1::NonMaxSuppression::validate_and_infer_types()
|
|||||||
iou_threshold_ps);
|
iou_threshold_ps);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (get_inputs().size() >= 5)
|
if (inputs().size() >= 5)
|
||||||
{
|
{
|
||||||
const auto score_threshold_ps = get_input_partial_shape(4);
|
const auto score_threshold_ps = get_input_partial_shape(4);
|
||||||
NODE_VALIDATION_CHECK(this,
|
NODE_VALIDATION_CHECK(this,
|
||||||
@ -315,7 +315,7 @@ void op::v3::NonMaxSuppression::validate()
|
|||||||
"Expected a 3D tensor for the 'scores' input. Got: ",
|
"Expected a 3D tensor for the 'scores' input. Got: ",
|
||||||
scores_ps);
|
scores_ps);
|
||||||
|
|
||||||
if (get_inputs().size() >= 3)
|
if (inputs().size() >= 3)
|
||||||
{
|
{
|
||||||
const auto max_boxes_ps = get_input_partial_shape(2);
|
const auto max_boxes_ps = get_input_partial_shape(2);
|
||||||
NODE_VALIDATION_CHECK(this,
|
NODE_VALIDATION_CHECK(this,
|
||||||
@ -324,7 +324,7 @@ void op::v3::NonMaxSuppression::validate()
|
|||||||
max_boxes_ps);
|
max_boxes_ps);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (get_inputs().size() >= 4)
|
if (inputs().size() >= 4)
|
||||||
{
|
{
|
||||||
const auto iou_threshold_ps = get_input_partial_shape(3);
|
const auto iou_threshold_ps = get_input_partial_shape(3);
|
||||||
NODE_VALIDATION_CHECK(this,
|
NODE_VALIDATION_CHECK(this,
|
||||||
@ -334,7 +334,7 @@ void op::v3::NonMaxSuppression::validate()
|
|||||||
iou_threshold_ps);
|
iou_threshold_ps);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (get_inputs().size() >= 5)
|
if (inputs().size() >= 5)
|
||||||
{
|
{
|
||||||
const auto score_threshold_ps = get_input_partial_shape(4);
|
const auto score_threshold_ps = get_input_partial_shape(4);
|
||||||
NODE_VALIDATION_CHECK(this,
|
NODE_VALIDATION_CHECK(this,
|
||||||
|
@ -68,31 +68,29 @@ bool pass::FusedOpDecomposition::run_on_node(shared_ptr<Node> node)
|
|||||||
size_t i = 0;
|
size_t i = 0;
|
||||||
for (auto output_node : subgraph_outputs)
|
for (auto output_node : subgraph_outputs)
|
||||||
{
|
{
|
||||||
for (size_t j = 0; j < output_node->get_outputs().size(); j++, i++)
|
for (size_t j = 0; j < output_node->outputs().size(); j++, i++)
|
||||||
{
|
{
|
||||||
set<descriptor::Input*> fop_users{begin(node->get_outputs().at(i).get_inputs()),
|
std::set<Input<Node>> fop_users = node->outputs().at(i).get_target_inputs();
|
||||||
end(node->get_outputs().at(i).get_inputs())};
|
|
||||||
for (auto fop_user : fop_users)
|
for (auto fop_user : fop_users)
|
||||||
{
|
{
|
||||||
if (auto goe = as_type<op::GetOutputElement>(fop_user->get_raw_pointer_node()))
|
if (auto goe = as_type<op::GetOutputElement>(fop_user.get_node()))
|
||||||
{
|
{
|
||||||
Output<Node> goe_output = goe->get_as_output();
|
Output<Node> goe_output = goe->get_as_output();
|
||||||
if (goe_output.get_index() == i &&
|
if (goe_output.get_index() == i &&
|
||||||
!goe->output(0).get_target_inputs().empty())
|
!goe->output(0).get_target_inputs().empty())
|
||||||
{
|
{
|
||||||
// Replace GOE users
|
// Replace GOE users
|
||||||
set<descriptor::Input*> goe_users{
|
std::set<Input<Node>> goe_users =
|
||||||
begin(goe->get_outputs().at(0).get_inputs()),
|
goe->outputs().at(0).get_target_inputs();
|
||||||
end(goe->get_outputs().at(0).get_inputs())};
|
|
||||||
for (auto goe_user : goe_users)
|
for (auto goe_user : goe_users)
|
||||||
{
|
{
|
||||||
goe_user->replace_output(output_node->get_outputs().at(j));
|
goe_user.replace_source_output(output_node->output(j));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
fop_user->replace_output(output_node->get_outputs().at(j));
|
fop_user.replace_source_output(output_node->output(j));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user