diff --git a/ngraph/core/src/validation_util.cpp b/ngraph/core/src/validation_util.cpp index 7b37ba7f042..ba78f9ee3aa 100644 --- a/ngraph/core/src/validation_util.cpp +++ b/ngraph/core/src/validation_util.cpp @@ -1225,35 +1225,6 @@ bool could_propagate(const Output& output, std::vector& order) return status; } -void propagate_rt_info(Node* node, const Output& final_port) -{ - auto node_outputs = node->outputs(); - bool same_outputs = - std::all_of(node_outputs.begin(), node_outputs.end(), [](const Output& output) { - return output.get_tensor().has_and_set_bound(); - }); - if (same_outputs && op::is_constant(node)) // constant should not propagate it's rt_info - { - std::unordered_set stop_nodes; - for (const auto& in : final_port.get_target_inputs()) - stop_nodes.insert(in.get_node()); - - auto curr_node = node->shared_from_this(); - for (const auto& output : node_outputs) - { - if (output == final_port) - continue; - for (auto& in : output.get_target_inputs()) - { - if (stop_nodes.count(in.get_node())) - continue; - auto consumer = in.get_node()->shared_from_this(); - copy_runtime_info({curr_node, consumer}, consumer); - } - } - } -} - HostTensorPtr evaluate_bound(const Output& output, bool is_upper) { // bound is already set in the tensor @@ -1291,7 +1262,6 @@ HostTensorPtr evaluate_bound(const Output& output, bool is_upper) for (const auto& input : input_values) if (input.get_target_inputs().size() == 1) input.get_tensor().invalidate_values(); - propagate_rt_info(node, output); } else {