diff --git a/src/plugins/intel_gpu/src/graph/program.cpp b/src/plugins/intel_gpu/src/graph/program.cpp index 3eca5027a15..41fc055b4c2 100644 --- a/src/plugins/intel_gpu/src/graph/program.cpp +++ b/src/plugins/intel_gpu/src/graph/program.cpp @@ -1138,8 +1138,14 @@ data_types program::get_inference_precision(const program_node& node) const { } std::vector input_dts; for (auto& dep : node.get_dependencies()) { - input_dts.push_back(dep->get_output_layout().data_type); + if (dep->is_valid_output_layout()) + input_dts.push_back(dep->get_output_layout().data_type); } + + // Return f32 data_type as default inference precision if any layout is invalid + if (input_dts.size() != node.get_dependencies().size() || !node.is_valid_output_layout()) + return data_types::f32; + data_types output_dt = node.get_output_layout().data_type; assert(!input_dts.empty()); @@ -1205,13 +1211,19 @@ program::primitives_info program::get_current_stage_info() const { } } + // Initialize output_layout with dummy values and use them if layout is invalid + layout output_layout{ cldnn::data_types::f32, cldnn::format::any, {1, 1, 1, 1} }; + + if (p->is_valid_output_layout()) + output_layout = p->get_output_layout(); + primitive_info pi(p->id(), type_to_str(p->get_primitive()), dependencies, users, fused, - p->get_output_layout(), - fmt_to_str(p->get_output_layout().format), + output_layout, + fmt_to_str(output_layout.format), get_implementation_info(p->id()), get_inference_precision(*p), p->selected_impl ? p->selected_impl->is_cpu() : false, @@ -1225,13 +1237,8 @@ program::primitives_info program::get_current_stage_info() const { void program::save_pass_info(std::string pass_name) { // TODO: Directory path here can be probably changed to some bool flag - if (!options.get()->directory_path.empty()) { - for (auto& node : this->get_processing_order()) { - if (!node->is_type()) - node->get_output_layout(); - } + if (!options.get()->directory_path.empty()) optimizer_passes_info.emplace_back(pass_name, get_current_stage_info()); - } } void program::add_optimized_primitive_info(primitive_id optimized_primitive_id, diff --git a/src/plugins/intel_gpu/src/graph/program_dump_graph.cpp b/src/plugins/intel_gpu/src/graph/program_dump_graph.cpp index 3859634e479..e8048d8f456 100644 --- a/src/plugins/intel_gpu/src/graph/program_dump_graph.cpp +++ b/src/plugins/intel_gpu/src/graph/program_dump_graph.cpp @@ -165,17 +165,33 @@ std::string get_load_program_name(build_options opts) { void dump_graph_init(std::ofstream& graph, const program& program, std::function const& filter) { - const auto extr_oformat = [](program_node* ptr) { - std::string out = fmt_to_str(ptr->get_output_layout().format); - + const std::string invalid_layout_msg = "(invalid layout)"; + const auto extr_oformat = [&invalid_layout_msg](const program_node* ptr) { if (!ptr->is_valid_output_layout()) - out += " (invalid)"; + return invalid_layout_msg; + + auto output_layout = ptr->get_output_layout(); + std::string out = fmt_to_str(output_layout.format); return out; }; - const auto dump_mem_info = [](program_node* ptr) { + const auto extr_odt = [&invalid_layout_msg](const program_node* ptr) { + if (!ptr->is_valid_output_layout()) + return invalid_layout_msg; + + auto output_layout = ptr->get_output_layout(); + std::string out = dt_to_str(output_layout.data_type); + + return out; + }; + + const auto dump_mem_info = [&invalid_layout_msg](const program_node* ptr) { std::string out = "size_info: "; + if (!ptr->is_valid_output_layout()) { + return out + invalid_layout_msg; + } + auto out_layout = ptr->get_output_layout(); auto tensor_str = out_layout.size.to_string(); auto padding = out_layout.data_padding; @@ -202,7 +218,7 @@ void dump_graph_init(std::ofstream& graph, std::string node_type_name = get_extr_type(node_type.name()); graph << " " << get_node_id(node) << "[label=\"" << node->id() << ":\n" << node_type_name << "\n out format: " + extr_oformat(node) - << "\n out data_type: " + dt_to_str(node->get_output_layout().data_type) + << "\n out data_type: " + extr_odt(node) << "\\nprocessing number: " << program.get_processing_order().get_processing_number(node) << "\\n color:" << (node->is_reusing_memory() ? std::to_string(node->get_reused_memory_color()) : "none") << (node->can_be_optimized() ? "\\n optimized out" : ""); diff --git a/src/plugins/intel_gpu/src/graph/program_node.cpp b/src/plugins/intel_gpu/src/graph/program_node.cpp index d8687afde9e..605d4e00ca0 100644 --- a/src/plugins/intel_gpu/src/graph/program_node.cpp +++ b/src/plugins/intel_gpu/src/graph/program_node.cpp @@ -214,7 +214,7 @@ layout program_node::get_output_layout(bool invalidate_users_if_changed) { layout program_node::get_output_layout() const { if (!valid_output_layout) - throw std::runtime_error("Output layout not calculated"); + throw std::runtime_error("Output layout not calculated for " + id() + " node"); return output_layout; }