diff --git a/src/plugins/intel_gpu/src/graph/one_hot.cpp b/src/plugins/intel_gpu/src/graph/one_hot.cpp index e16044d9f6c..ba0254b1227 100644 --- a/src/plugins/intel_gpu/src/graph/one_hot.cpp +++ b/src/plugins/intel_gpu/src/graph/one_hot.cpp @@ -104,6 +104,9 @@ std::string one_hot_inst::to_string(one_hot_node const& node) { one_hot_inst::typed_primitive_inst(network& network, one_hot_node const& node) : parent(network, node) { auto input_layout = node.input().get_output_layout(); + if (input_layout.is_dynamic()) + return; + const auto& input_sizes = input_layout.get_tensor(); const auto& output_sizes = argument.shape; diff --git a/src/plugins/intel_gpu/src/graph/reorder.cpp b/src/plugins/intel_gpu/src/graph/reorder.cpp index 6f1e9efeaab..511a6ccb281 100644 --- a/src/plugins/intel_gpu/src/graph/reorder.cpp +++ b/src/plugins/intel_gpu/src/graph/reorder.cpp @@ -200,7 +200,7 @@ std::string reorder_inst::to_string(reorder_node const& node) { } reorder_inst::typed_primitive_inst(network& network, reorder_node const& node) - : parent(network, node, !node.can_be_optimized() && !node.is_dynamic()) { + : parent(network, node, (!node.can_be_optimized() && node.get_output_layout().is_static()) ? true : false) { if (node.can_be_optimized()) reuse_input(); diff --git a/src/plugins/intel_gpu/src/graph/reshape.cpp b/src/plugins/intel_gpu/src/graph/reshape.cpp index 9268ead8cd7..bdf68b3bcc6 100644 --- a/src/plugins/intel_gpu/src/graph/reshape.cpp +++ b/src/plugins/intel_gpu/src/graph/reshape.cpp @@ -60,7 +60,14 @@ std::vector reshape_inst::calc_output_layouts(reshape_node const& /*node // we return output_partial_shape taken from the original model intead of something like PartialShape::dynamic(rank) // as ngraph may refine output shape using interval arithmetic if ((memory_deps.empty() && prim->output_pattern.empty()) || input_layout.is_dynamic()) { - return { layout{prim->output_partial_shape, input_layout.data_type, format::adjust_to_rank(input_layout.format, prim->output_partial_shape.size())} }; + if (prim->output_partial_shape.size() > 0) { + auto fm = format::adjust_to_rank(input_layout.format, prim->output_partial_shape.size()); + return { layout{prim->output_partial_shape, input_layout.data_type, fm} }; + } else if (prim->output_shape != tensor()) { + return { layout{input_layout.data_type, input_layout.format, prim->output_shape} }; + } else { + OPENVINO_ASSERT("There are no output pattern, predefined output partial shape, and output shape!"); + } } ShapeType pattern_shape = impl_param.input_layouts.size() == 2 ? impl_param.get_input_layout(1).get() @@ -78,16 +85,19 @@ std::vector reshape_inst::calc_output_layouts(reshape_node const& /*node case reshape::reshape_mode::base: { ov::op::v1::Reshape op; op.set_special_zero(prim->special_zero); + op.set_friendly_name(prim->id.c_str()); shape_infer(&op, input_shapes, output_shapes, const_data); break; } case reshape::reshape_mode::squeeze: { ov::op::v0::Squeeze op; + op.set_friendly_name(prim->id.c_str()); shape_infer(&op, input_shapes, output_shapes, const_data); break; } case reshape::reshape_mode::unsqueeze: { ov::op::v0::Unsqueeze op; + op.set_friendly_name(prim->id.c_str()); shape_infer(&op, input_shapes, output_shapes, const_data); break; } @@ -114,7 +124,7 @@ std::vector reshape_inst::calc_output_layouts(reshape_node const& /*node run_shape_infer(prim->mode); } - return { layout{output_shapes[0], input_layout.data_type, format::adjust_to_rank(input_layout.format, output_shapes[0].size())} }; + return { layout {output_shapes[0], input_layout.data_type, format::adjust_to_rank(input_layout.format, output_shapes[0].size())} }; } template std::vector reshape_inst::calc_output_layouts(reshape_node const& node, const kernel_impl_params& impl_param); diff --git a/src/plugins/intel_gpu/src/plugin/ops/one_hot.cpp b/src/plugins/intel_gpu/src/plugin/ops/one_hot.cpp index 458714813e3..277dbe1a4a9 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/one_hot.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/one_hot.cpp @@ -52,10 +52,7 @@ static void CreateOneHotOp(Program& p, const std::shared_ptrcast_vector()[0]; auto out_pshape = op->get_output_partial_shape(0); - if (out_pshape.is_dynamic()) { - IE_THROW() << "OneHot doesn't support dynamic shapes yet"; - } - auto out_tensor = tensor_from_dims(out_pshape.to_shape()); + cldnn::tensor out_tensor = out_pshape.is_static() ? tensor_from_dims(out_pshape.to_shape()) : cldnn::tensor{}; auto oneHotPrim = cldnn::one_hot(layerName, inputPrimitives[0], diff --git a/src/plugins/intel_gpu/src/plugin/ops/strided_slice.cpp b/src/plugins/intel_gpu/src/plugin/ops/strided_slice.cpp index 0823f51cca0..8bbab32fef9 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/strided_slice.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/strided_slice.cpp @@ -227,9 +227,14 @@ static void CreateStridedSliceOp(Program& p, const std::shared_ptr output_pattern(output_shape.size()); + auto out_p = output_pattern.begin(); + for (auto s = output_shape.begin(); s != output_shape.end() && out_p != output_pattern.end(); s++, out_p++) { + *out_p = *s; + } + auto reshapeOutName = op->get_friendly_name() + "/Crop"; - auto reshapePrim = cldnn::reshape(reshapeOutName, layerName, targetShape); + auto reshapePrim = cldnn::reshape(reshapeOutName, layerName, false, output_pattern, output_pshape); p.add_primitive(*op, reshapePrim); last_layer_primitive = reshapeOutName; }