[GPU] Update reshape factory to handle dynamic cases (#13027)

This commit is contained in:
Vladimir Paramuzov
2022-09-15 11:02:02 +04:00
committed by GitHub
parent 57753f7938
commit a27f6196d5
2 changed files with 60 additions and 31 deletions

View File

@@ -175,7 +175,8 @@ void primitive_inst::update_shape() {
auto memory_deps = _node.get_const_memory_deps();
std::vector<event::ptr> dependencies_events;
for (auto& i : _node.get_shape_infer_dependencies()) {
if (memory_deps.count(i) > 0) {
// Some primitives may have flexible count of deps (e.g. reshape), thus allow skipping some deps
if (memory_deps.count(i) > 0 || i >= _node.get_dependencies().size()) {
continue;
}
auto& dep = _node.get_dependency(i);

View File

@@ -15,7 +15,7 @@
namespace ov {
namespace intel_gpu {
static void CreateCommonReshapeOp(Program& p, const std::shared_ptr<ngraph::Node>& op, cldnn::reshape::reshape_mode mode) {
static void CreateCommonReshapeOp(Program& p, const std::shared_ptr<ngraph::Node>& op, cldnn::reshape::reshape_mode mode, bool special_zero = false) {
validate_inputs_count(op, {1, 2});
auto inputPrimitives = p.GetInputPrimitiveIDs(op);
std::string layerName = layer_type_name_ID(op);
@@ -23,41 +23,69 @@ static void CreateCommonReshapeOp(Program& p, const std::shared_ptr<ngraph::Node
auto input_pshape = op->get_input_partial_shape(0);
auto output_pshape = op->get_output_partial_shape(0);
OPENVINO_ASSERT(input_pshape.is_static() && output_pshape.is_static(), "Dynamic shapes are not supported for Reshape operation yet");
auto outTensor = tensor_from_dims(output_pshape.to_shape());
// if we convert from or to 5D/6D, additional reorder also required to change format
cldnn::primitive_id reshapeInputId = inputPrimitives[0];
if (input_pshape.size() != output_pshape.size()) {
cldnn::primitive_id reorderId = "reorder:" + op->get_friendly_name() + "_reorder";
cldnn::format outputFormat = cldnn::format::bfyx;
switch (output_pshape.size()) {
case 5: outputFormat = cldnn::format::bfzyx; break;
case 6: outputFormat = cldnn::format::bfwzyx; break;
default: break;
if (p.use_new_shape_infer() || op->is_dynamic()) {
std::shared_ptr<cldnn::reshape> reshape_prim = nullptr;
auto second_const_input = op->get_input_size() == 2 ? std::dynamic_pointer_cast<ngraph::op::v0::Constant>(op->get_input_node_shared_ptr(1)) : nullptr;
std::vector<int64_t> output_pattern = {};
if (second_const_input != nullptr) {
output_pattern = second_const_input->cast_vector<int64_t>();
}
cldnn::layout outputLayout(cldnn::element_type_to_data_type(op->get_output_element_type(0)), outputFormat, outTensor);
p.add_primitive(*op, cldnn::reorder(reorderId,
reshapeInputId,
outputLayout,
std::vector<float>(),
cldnn::reorder_mean_mode::subtract));
reshapeInputId = reorderId;
// If second input is absent (it's optional in Squeeze op) or it's constant, create reshape with single input and compile time out pattern
if (op->get_input_size() == 1 || second_const_input != nullptr) {
reshape_prim = std::make_shared<cldnn::reshape>(layerName,
inputPrimitives[0],
special_zero,
output_pattern,
output_pshape,
mode);
} else {
reshape_prim = std::make_shared<cldnn::reshape>(layerName,
inputPrimitives[0],
inputPrimitives[1],
special_zero,
output_pshape,
mode);
}
p.add_primitive(*op, reshape_prim);
} else {
OPENVINO_ASSERT(input_pshape.is_static() && output_pshape.is_static(), "Dynamic shapes are not supported for Reshape operation yet");
auto outTensor = tensor_from_dims(output_pshape.to_shape());
// if we convert from or to 5D/6D, additional reorder also required to change format
cldnn::primitive_id reshapeInputId = inputPrimitives[0];
if (input_pshape.size() != output_pshape.size()) {
cldnn::primitive_id reorderId = "reorder:" + op->get_friendly_name() + "_reorder";
cldnn::format outputFormat = cldnn::format::bfyx;
switch (output_pshape.size()) {
case 5: outputFormat = cldnn::format::bfzyx; break;
case 6: outputFormat = cldnn::format::bfwzyx; break;
default: break;
}
cldnn::layout outputLayout(cldnn::element_type_to_data_type(op->get_output_element_type(0)), outputFormat, outTensor);
p.add_primitive(*op, cldnn::reorder(reorderId,
reshapeInputId,
outputLayout,
std::vector<float>(),
cldnn::reorder_mean_mode::subtract));
reshapeInputId = reorderId;
}
auto reshapePrim = cldnn::reshape(layerName,
reshapeInputId,
outTensor,
mode);
p.add_primitive(*op, reshapePrim);
}
auto reshapePrim = cldnn::reshape(layerName,
reshapeInputId,
outTensor,
mode);
p.add_primitive(*op, reshapePrim);
}
static void CreateReshapeOp(Program& p, const std::shared_ptr<ngraph::op::v1::Reshape>& op) {
CreateCommonReshapeOp(p, op, cldnn::reshape::reshape_mode::base);
CreateCommonReshapeOp(p, op, cldnn::reshape::reshape_mode::base, op->get_special_zero());
}
static void CreateSqueezeOp(Program& p, const std::shared_ptr<ngraph::op::v0::Squeeze>& op) {