[GPU] Use partial shape in some ops factories (#12353)

This commit is contained in:
Vladimir Paramuzov 2022-08-10 19:51:55 +04:00 committed by GitHub
parent 91ce7406ad
commit 52b57fa860
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
17 changed files with 87 additions and 102 deletions

View File

@ -21,7 +21,6 @@ static void CreateCumSumOp(Program& p, const std::shared_ptr<ngraph::op::v0::Cum
auto exclusive = op->is_exclusive(); auto exclusive = op->is_exclusive();
auto reverse = op->is_reverse(); auto reverse = op->is_reverse();
size_t rank = op->get_input_shape(0).size();
int64_t axis = 0; int64_t axis = 0;
if (op->get_input_size() == 2) { if (op->get_input_size() == 2) {
auto axes_constant = std::dynamic_pointer_cast<ngraph::op::Constant>(op->get_input_node_shared_ptr(1)); auto axes_constant = std::dynamic_pointer_cast<ngraph::op::Constant>(op->get_input_node_shared_ptr(1));
@ -30,10 +29,7 @@ static void CreateCumSumOp(Program& p, const std::shared_ptr<ngraph::op::v0::Cum
} }
axis = axes_constant->cast_vector<int64_t>()[0]; axis = axes_constant->cast_vector<int64_t>()[0];
} }
if (axis < 0) axis = ov::normalize_axis(op.get(), axis, op->get_input_partial_shape(0).rank());
axis += rank;
if (axis < 0 || axis >= static_cast<int64_t>(rank))
IE_THROW() << "CumSum axis is not correspond to number of dimensions";
auto primitive = cldnn::cum_sum(layerName, auto primitive = cldnn::cum_sum(layerName,
inputPrimitives[0], inputPrimitives[0],

View File

@ -18,8 +18,8 @@ static void CreateGatherNDOp(Program& p, const std::shared_ptr<ngraph::op::v5::G
auto inputPrimitives = p.GetInputPrimitiveIDs(op); auto inputPrimitives = p.GetInputPrimitiveIDs(op);
std::string layerName = layer_type_name_ID(op); std::string layerName = layer_type_name_ID(op);
int32_t input_rank = static_cast<int32_t>(op->get_input_shape(0).size()); auto input_rank = op->get_input_partial_shape(0).size();
int32_t indices_rank = static_cast<int32_t>(op->get_input_shape(1).size()); auto indices_rank = op->get_input_partial_shape(1).size();
auto batch_dims = op->get_batch_dims(); auto batch_dims = op->get_batch_dims();
@ -43,8 +43,8 @@ static void CreateGatherNDOp(Program& p, const std::shared_ptr<ngraph::op::v8::G
auto inputPrimitives = p.GetInputPrimitiveIDs(op); auto inputPrimitives = p.GetInputPrimitiveIDs(op);
std::string layerName = layer_type_name_ID(op); std::string layerName = layer_type_name_ID(op);
int32_t input_rank = static_cast<int32_t>(op->get_input_shape(0).size()); auto input_rank = op->get_input_partial_shape(0).size();
int32_t indices_rank = static_cast<int32_t>(op->get_input_shape(1).size()); auto indices_rank = op->get_input_partial_shape(1).size();
auto batch_dims = op->get_batch_dims(); auto batch_dims = op->get_batch_dims();

View File

@ -49,10 +49,8 @@ static void CreateMVNOp(Program& p, const std::shared_ptr<ngraph::op::v6::MVN>&
if (!inConst) if (!inConst)
IE_THROW() << "Unsupported parameter nodes type in " << op->get_friendly_name() << " (" << op->get_type_name() << ")"; IE_THROW() << "Unsupported parameter nodes type in " << op->get_friendly_name() << " (" << op->get_type_name() << ")";
auto& mvnShape = op->get_output_shape(0); std::vector<int64_t> axes = inConst->cast_vector<int64_t>();
std::vector<int32_t> axes = inConst->cast_vector<int32_t>(); ov::normalize_axes(op.get(), op->get_output_partial_shape(0).size(), axes);
for (int32_t& axis : axes)
axis = axis < 0 ? axis + mvnShape.size() : axis;
const size_t chanelAxis = 1; const size_t chanelAxis = 1;
bool across_channels = std::find(axes.begin(), axes.end(), chanelAxis) != axes.end(); bool across_channels = std::find(axes.begin(), axes.end(), chanelAxis) != axes.end();

View File

@ -26,11 +26,16 @@ static void CreatePriorBoxClusteredOp(Program& p, const std::shared_ptr<ngraph::
float offset = attrs.offset; float offset = attrs.offset;
bool clip = attrs.clip; bool clip = attrs.clip;
auto inp_dims = op->get_input_shape(0); auto input_pshape = op->get_input_partial_shape(0);
auto img_dims = op->get_input_shape(1); auto img_pshape = op->get_input_partial_shape(1);
int img_w = static_cast<int>(img_dims.back()); OPENVINO_ASSERT(input_pshape.is_static() && img_pshape.is_static(), "Dynamic shapes are not supported for PriorBoxClustered operation yet");
int img_h = static_cast<int>(img_dims.at(img_dims.size() - 2));
auto input_shape = input_pshape.to_shape();
auto img_shape = img_pshape.to_shape();
int img_w = static_cast<int>(img_shape.back());
int img_h = static_cast<int>(img_shape.at(img_shape.size() - 2));
cldnn::tensor img_size = (cldnn::tensor) cldnn::spatial(TensorValue(img_w), TensorValue(img_h)); cldnn::tensor img_size = (cldnn::tensor) cldnn::spatial(TensorValue(img_w), TensorValue(img_h));
auto step_w = attrs.step_widths; auto step_w = attrs.step_widths;
@ -41,8 +46,8 @@ static void CreatePriorBoxClusteredOp(Program& p, const std::shared_ptr<ngraph::
} }
if (step_w == 0.0f && step_h == 0.0f) { if (step_w == 0.0f && step_h == 0.0f) {
step_w = static_cast<float>(img_w) / inp_dims.back(); step_w = static_cast<float>(img_w) / input_shape.back();
step_h = static_cast<float>(img_h) / inp_dims.at(img_dims.size() - 2); step_h = static_cast<float>(img_h) / input_shape.at(img_shape.size() - 2);
} }
auto priorBoxPrim = cldnn::prior_box(layerName, auto priorBoxPrim = cldnn::prior_box(layerName,
@ -84,10 +89,13 @@ static void CreatePriorBoxOp(Program& p, const std::shared_ptr<ngraph::op::v0::P
auto step_w = attrs.step; auto step_w = attrs.step;
auto step_h = attrs.step; auto step_h = attrs.step;
auto img_dims = op->get_input_shape(1); auto img_pshape = op->get_input_partial_shape(1);
OPENVINO_ASSERT(img_pshape.is_static(), "Dynamic shapes are not supported for PriorBox operation yet");
auto img_shape = img_pshape.to_shape();
auto wdim = img_dims.back();
auto hdim = img_dims.at(img_dims.size()-2); auto wdim = img_shape.back();
auto hdim = img_shape.at(img_shape.size()-2);
cldnn::tensor img_size = (cldnn::tensor) cldnn::spatial(TensorValue(wdim), TensorValue(hdim)); cldnn::tensor img_size = (cldnn::tensor) cldnn::spatial(TensorValue(wdim), TensorValue(hdim));
auto priorBoxPrim = cldnn::prior_box(layerName, auto priorBoxPrim = cldnn::prior_box(layerName,

View File

@ -28,7 +28,7 @@ static void CreateReduceOp(Program& p, const std::shared_ptr<ngraph::Node>& op,
auto inputPrimitives = p.GetInputPrimitiveIDs(op); auto inputPrimitives = p.GetInputPrimitiveIDs(op);
std::string layerName = layer_type_name_ID(op); std::string layerName = layer_type_name_ID(op);
int64_t rank = op->get_input_shape(0).size(); int64_t rank = op->get_input_partial_shape(0).size();
auto axes_constant = std::dynamic_pointer_cast<ngraph::op::Constant>(op->get_input_node_shared_ptr(1)); auto axes_constant = std::dynamic_pointer_cast<ngraph::op::Constant>(op->get_input_node_shared_ptr(1));
if (!axes_constant) { if (!axes_constant) {

View File

@ -20,17 +20,20 @@ static void CreateCommonReshapeOp(Program& p, const std::shared_ptr<ngraph::Node
auto inputPrimitives = p.GetInputPrimitiveIDs(op); auto inputPrimitives = p.GetInputPrimitiveIDs(op);
std::string layerName = layer_type_name_ID(op); std::string layerName = layer_type_name_ID(op);
auto inDims = op->get_input_shape(0); auto input_pshape = op->get_input_partial_shape(0);
auto outDims = op->get_output_shape(0); auto output_pshape = op->get_output_partial_shape(0);
auto outTensor = tensor_from_dims(outDims);
OPENVINO_ASSERT(input_pshape.is_static() && output_pshape.is_static(), "Dynamic shapes are not supported for Reshape operation yet");
auto outTensor = tensor_from_dims(output_pshape.to_shape());
// if we convert from or to 5D/6D, additional reorder also required to change format // if we convert from or to 5D/6D, additional reorder also required to change format
cldnn::primitive_id reshapeInputId = inputPrimitives[0]; cldnn::primitive_id reshapeInputId = inputPrimitives[0];
if (inDims.size() != outDims.size()) { if (input_pshape.size() != output_pshape.size()) {
cldnn::primitive_id reorderId = "reorder:" + op->get_friendly_name() + "_reorder"; cldnn::primitive_id reorderId = "reorder:" + op->get_friendly_name() + "_reorder";
cldnn::format outputFormat = cldnn::format::bfyx; cldnn::format outputFormat = cldnn::format::bfyx;
switch (outDims.size()) { switch (output_pshape.size()) {
case 5: outputFormat = cldnn::format::bfzyx; break; case 5: outputFormat = cldnn::format::bfzyx; break;
case 6: outputFormat = cldnn::format::bfwzyx; break; case 6: outputFormat = cldnn::format::bfwzyx; break;
default: break; default: break;

View File

@ -20,7 +20,9 @@ void CreateRollOp(Program& p, const std::shared_ptr<ngraph::op::v7::Roll>& op) {
const auto inputs = p.GetInputPrimitiveIDs(op); const auto inputs = p.GetInputPrimitiveIDs(op);
const auto layer_name = layer_type_name_ID(op); const auto layer_name = layer_type_name_ID(op);
const auto& op_friendly_name = op->get_friendly_name(); const auto& op_friendly_name = op->get_friendly_name();
const auto& input_shape = op->get_input_shape(0); const auto& input_pshape = op->get_input_partial_shape(0);
OPENVINO_ASSERT(input_pshape.is_static(), "Dynamic shapes are not supported for Roll operation yet");
const auto& input_shape = input_pshape.to_shape();
const uint8_t rank = input_shape.size(); const uint8_t rank = input_shape.size();
const auto format = cldnn::format::get_default_format(rank); const auto format = cldnn::format::get_default_format(rank);
const auto default_rank = format.dimension(); const auto default_rank = format.dimension();

View File

@ -18,16 +18,11 @@ static void CreateScatterElementsUpdateOp(Program& p, const std::shared_ptr<ngra
auto inputPrimitives = p.GetInputPrimitiveIDs(op); auto inputPrimitives = p.GetInputPrimitiveIDs(op);
std::string layerName = layer_type_name_ID(op); std::string layerName = layer_type_name_ID(op);
size_t rank = op->get_input_shape(0).size();
auto axes_constant = std::dynamic_pointer_cast<ngraph::op::Constant>(op->get_input_node_shared_ptr(3)); auto axes_constant = std::dynamic_pointer_cast<ngraph::op::Constant>(op->get_input_node_shared_ptr(3));
if (!axes_constant) { if (!axes_constant) {
OPENVINO_ASSERT("Unsupported parameter nodes type in ", op->get_friendly_name(), " (", op->get_type_name(), ")"); OPENVINO_ASSERT("Unsupported parameter nodes type in ", op->get_friendly_name(), " (", op->get_type_name(), ")");
} }
int64_t axis = axes_constant->cast_vector<int64_t>()[0]; int64_t axis = ov::normalize_axis(op.get(), axes_constant->cast_vector<int64_t>()[0], op->get_input_partial_shape(0).rank());
if (axis < 0)
axis += rank;
if (axis < 0 || axis >= static_cast<int64_t>(rank))
OPENVINO_ASSERT("ScatterElementsUpdate axis is not correspond to number of dimensions");
auto primitive = cldnn::scatter_elements_update(layerName, auto primitive = cldnn::scatter_elements_update(layerName,
inputPrimitives[0], inputPrimitives[0],

View File

@ -17,22 +17,7 @@ static void CreateScatterNDUpdateOp(Program& p, const std::shared_ptr<ngraph::op
p.ValidateInputs(op, {3}); p.ValidateInputs(op, {3});
auto inputPrimitives = p.GetInputPrimitiveIDs(op); auto inputPrimitives = p.GetInputPrimitiveIDs(op);
std::string layerName = layer_type_name_ID(op); std::string layerName = layer_type_name_ID(op);
auto indices_rank = op->get_input_shape(1).size(); auto indices_rank = op->get_input_partial_shape(1).size();
auto indices_constant = std::dynamic_pointer_cast<ngraph::op::Constant>(op->get_input_node_shared_ptr(1));
if (indices_constant) {
auto indices = indices_constant->cast_vector<int32_t>();
auto indices_last_dim = op->get_input_shape(1)[indices_rank - 1];
auto data_shape = op->get_input_shape(0);
bool valid = true;
for (size_t i = 0; i < indices.size(); ++i) {
if (indices[i] >= static_cast<int>(data_shape[i % indices_last_dim]))
valid = false;
}
if (!valid)
IE_THROW() << "Invaild indices values";
}
auto primitive = cldnn::scatter_nd_update(layerName, auto primitive = cldnn::scatter_nd_update(layerName,
inputPrimitives[0], inputPrimitives[0],

View File

@ -19,8 +19,8 @@ static void CreateSelectOp(Program& p, const std::shared_ptr<ngraph::op::v1::Sel
auto inputPrimitives = p.GetInputPrimitiveIDs(op); auto inputPrimitives = p.GetInputPrimitiveIDs(op);
std::string layerName = layer_type_name_ID(op); std::string layerName = layer_type_name_ID(op);
auto outDims = op->get_output_shape(0); auto output_pshape = op->get_output_partial_shape(0);
auto outDimsN = outDims.size(); auto output_rank = output_pshape.size();
auto broadcast_type = op->get_auto_broadcast(); auto broadcast_type = op->get_auto_broadcast();
@ -32,13 +32,15 @@ static void CreateSelectOp(Program& p, const std::shared_ptr<ngraph::op::v1::Sel
if (broadcast_type.m_type == ngraph::op::AutoBroadcastType::NUMPY) { if (broadcast_type.m_type == ngraph::op::AutoBroadcastType::NUMPY) {
// Preprocess inputs // Preprocess inputs
for (size_t i = 0; i < inputPrimitives.size(); ++i) { for (size_t i = 0; i < inputPrimitives.size(); ++i) {
auto inputDims = op->get_input_shape(i); auto input_pshape = op->get_input_partial_shape(i);
auto inputDimsN = inputDims.size(); OPENVINO_ASSERT(input_pshape.is_static(), "Dynamic shapes are not supported for v1::Select with NUMPY mode yet");
auto input_shape = input_pshape.to_shape();
auto input_rank = input_shape.size();
// Add reorder if changing number of dimensions requires changing format // Add reorder if changing number of dimensions requires changing format
auto targetFormat = cldnn::format::get_default_format(outDimsN); auto targetFormat = cldnn::format::get_default_format(output_rank);
if (targetFormat.value != cldnn::format::get_default_format(inputDimsN).value) { if (targetFormat.value != cldnn::format::get_default_format(input_rank).value) {
auto reorderName = layerName + "_cldnn_in" + std::to_string(i) + "_reorder"; auto reorderName = layerName + "_cldnn_in" + std::to_string(i) + "_reorder";
auto targetDatatype = DataTypeFromPrecision(op->get_input_element_type(i)); auto targetDatatype = DataTypeFromPrecision(op->get_input_element_type(i));
auto reorderPrim = cldnn::reorder(reorderName, auto reorderPrim = cldnn::reorder(reorderName,
@ -56,13 +58,13 @@ static void CreateSelectOp(Program& p, const std::shared_ptr<ngraph::op::v1::Sel
} }
// Reshape input if they differ or select specific shape matches default one // Reshape input if they differ or select specific shape matches default one
if (inputDimsN != outDimsN || inputDimsN < 4) { if (input_rank != output_rank || input_rank < 4) {
auto reshapeName = layerName + "_cldnn_in" + std::to_string(i) + "_reshape"; auto reshapeName = layerName + "_cldnn_in" + std::to_string(i) + "_reshape";
// Extend input dimensions to the same size as output dimensions by prepending ones // Extend input dimensions to the same size as output dimensions by prepending ones
inputDims.insert(inputDims.begin(), outDimsN - inputDimsN, 1ul); input_shape.insert(input_shape.begin(), output_rank - input_rank, 1ul);
auto targetShape = tensor_from_dims(inputDims); auto targetShape = tensor_from_dims(input_shape);
auto reshapePrim = cldnn::reshape(reshapeName, inputPrimitives[i], targetShape, op->get_friendly_name()); auto reshapePrim = cldnn::reshape(reshapeName, inputPrimitives[i], targetShape, op->get_friendly_name());

View File

@ -17,22 +17,8 @@ static void CreateShuffleChannelsOp(Program& p, const std::shared_ptr<ngraph::op
auto inputPrimitives = p.GetInputPrimitiveIDs(op); auto inputPrimitives = p.GetInputPrimitiveIDs(op);
std::string layerName = layer_type_name_ID(op); std::string layerName = layer_type_name_ID(op);
auto in_rank = op->get_input_shape(0).size();
int32_t group = op->get_group(); int32_t group = op->get_group();
int32_t axis = op->get_axis(); int64_t axis = ov::normalize_axis(op.get(), op->get_axis(), op->get_input_partial_shape(0).rank());
if (axis < 0)
axis += in_rank;
if (axis < 0 || axis >= static_cast<int32_t>(in_rank))
IE_THROW() << "Incorrect axis value! Actual axis is" + std::to_string(group);
if (group < 1)
IE_THROW() << "Invalid group size value (should equal at least one). Actual block size is" << std::to_string(group);
if (op->get_input_shape(0)[axis] % group != 0)
IE_THROW() << "Group parameter must evenly divide the channel dimension. Actual group size is " << std::to_string(axis);
auto shuffleChannelsPrim = cldnn::shuffle_channels(layerName, auto shuffleChannelsPrim = cldnn::shuffle_channels(layerName,
inputPrimitives[0], inputPrimitives[0],

View File

@ -31,12 +31,7 @@ static void CreateSoftmaxOp(Program& p, const std::shared_ptr<ngraph::op::v8::So
auto inputPrimitives = p.GetInputPrimitiveIDs(op); auto inputPrimitives = p.GetInputPrimitiveIDs(op);
std::string layerName = layer_type_name_ID(op); std::string layerName = layer_type_name_ID(op);
int64_t axis = op->get_axis(); int64_t axis = ov::normalize_axis(op.get(), op->get_axis(), op->get_input_partial_shape(0).rank());
size_t rank = op->get_input_shape(0).size();
if (axis < 0)
axis += rank;
if (axis < 0 || axis >= static_cast<int64_t>(rank))
IE_THROW() << "Softmax axis is not correspond to number of dimensions";
auto softmaxPrim = cldnn::softmax(layerName, auto softmaxPrim = cldnn::softmax(layerName,
inputPrimitives[0], inputPrimitives[0],
@ -52,9 +47,7 @@ static void CreateLogSoftmaxOp(Program& p, const std::shared_ptr<ngraph::op::v5:
std::string layerName = layer_type_name_ID(op); std::string layerName = layer_type_name_ID(op);
std::string layerNameSoftmax = layer_type_name_ID(op) + "_softmax"; std::string layerNameSoftmax = layer_type_name_ID(op) + "_softmax";
auto axis = op->get_axis(); int64_t axis = ov::normalize_axis(op.get(), op->get_axis(), op->get_input_partial_shape(0).rank());
if (axis < 0)
axis += op->get_input_shape(0).size();
auto softmaxPrim = cldnn::softmax(layerNameSoftmax, auto softmaxPrim = cldnn::softmax(layerNameSoftmax,
inputPrimitives[0], inputPrimitives[0],

View File

@ -18,7 +18,7 @@ static void CreateSpaceToBatchOp(Program& p, const std::shared_ptr<ngraph::op::v
auto inputPrimitives = p.GetInputPrimitiveIDs(op); auto inputPrimitives = p.GetInputPrimitiveIDs(op);
std::string layerName = layer_type_name_ID(op); std::string layerName = layer_type_name_ID(op);
auto rank = op->get_input_shape(0).size(); auto rank = op->get_input_partial_shape(0).size();
auto format = cldnn::format::get_default_format(rank); auto format = cldnn::format::get_default_format(rank);
std::vector<cldnn::tensor> inputs; std::vector<cldnn::tensor> inputs;
@ -36,7 +36,10 @@ static void CreateSpaceToBatchOp(Program& p, const std::shared_ptr<ngraph::op::v
} }
inputs.emplace_back(format, sizes, default_size); inputs.emplace_back(format, sizes, default_size);
} }
auto out_size = tensor_from_dims(op->get_output_shape(0)); auto output_pshape = op->get_output_partial_shape(0);
// In case of dynamic shapes pass dummy shape value to space_to_batch primitive
// To be removed once we enable internal shape infer for all operations
auto out_size = output_pshape.is_static() ? tensor_from_dims(output_pshape.to_shape()) : cldnn::tensor();
auto batchToSpacePrim = cldnn::space_to_batch(layerName, auto batchToSpacePrim = cldnn::space_to_batch(layerName,
inputPrimitives[0], // input inputPrimitives[0], // input

View File

@ -17,8 +17,12 @@ static void CreateCommonSplitOp(Program& p, const std::shared_ptr<ngraph::Node>&
auto inputPrimitives = p.GetInputPrimitiveIDs(op); auto inputPrimitives = p.GetInputPrimitiveIDs(op);
std::string layerName = layer_type_name_ID(op); std::string layerName = layer_type_name_ID(op);
auto inputDims = op->get_input_shape(0); auto input_pshape = op->get_input_partial_shape(0);
InferenceEngine::SizeVector startOffset(inputDims.size()); OPENVINO_ASSERT(input_pshape.is_static(),
"Dynamic shapes are not supported yet for v1::Split and v1::VariadicSplit operations");
auto input_shape = input_pshape.to_shape();
InferenceEngine::SizeVector start_offset(input_shape.size());
bool is_single_out_split = op->get_output_size() == 1; bool is_single_out_split = op->get_output_size() == 1;
@ -26,12 +30,12 @@ static void CreateCommonSplitOp(Program& p, const std::shared_ptr<ngraph::Node>&
std::string outLayerName = layerName + (is_single_out_split ? "" : "." + std::to_string(i)); std::string outLayerName = layerName + (is_single_out_split ? "" : "." + std::to_string(i));
const auto outLayerDims = op->get_output_shape(i); const auto outLayerDims = op->get_output_shape(i);
NGRAPH_SUPPRESS_DEPRECATED_START NGRAPH_SUPPRESS_DEPRECATED_START
if (outLayerDims.size() != startOffset.size()) { if (outLayerDims.size() != start_offset.size()) {
IE_THROW() << "Invalid dimesions in split layer: " << op->get_friendly_name() IE_THROW() << "Invalid dimesions in split layer: " << op->get_friendly_name()
<< " output: " << op->get_output_tensor_name(i); << " output: " << op->get_output_tensor_name(i);
} }
for (size_t i = 0; i < inputDims.size(); i++) { for (size_t i = 0; i < input_shape.size(); i++) {
if ((outLayerDims[i] + startOffset[i]) > inputDims[i]) { if ((outLayerDims[i] + start_offset[i]) > input_shape[i]) {
IE_THROW() << "Invalid dimesions in split layer: " << op->get_friendly_name() IE_THROW() << "Invalid dimesions in split layer: " << op->get_friendly_name()
<< " output: " << op->get_output_tensor_name(i); << " output: " << op->get_output_tensor_name(i);
} }
@ -39,7 +43,7 @@ static void CreateCommonSplitOp(Program& p, const std::shared_ptr<ngraph::Node>&
NGRAPH_SUPPRESS_DEPRECATED_END NGRAPH_SUPPRESS_DEPRECATED_END
auto outTensor = tensor_from_dims(outLayerDims, 1); auto outTensor = tensor_from_dims(outLayerDims, 1);
auto offsetTensor = tensor_from_dims(startOffset, 0); auto offsetTensor = tensor_from_dims(start_offset, 0);
auto cropPrim = cldnn::crop(outLayerName, inputPrimitives[0], outTensor, offsetTensor, op->get_friendly_name()); auto cropPrim = cldnn::crop(outLayerName, inputPrimitives[0], outTensor, offsetTensor, op->get_friendly_name());
p.primitiveIDs[outLayerName] = outLayerName; p.primitiveIDs[outLayerName] = outLayerName;
@ -48,9 +52,9 @@ static void CreateCommonSplitOp(Program& p, const std::shared_ptr<ngraph::Node>&
p.profilingIDs.push_back(outLayerName); p.profilingIDs.push_back(outLayerName);
p.InitProfileInfo(outLayerName, "Crop"); p.InitProfileInfo(outLayerName, "Crop");
for (size_t i = 0; i < inputDims.size(); i++) { for (size_t i = 0; i < input_shape.size(); i++) {
if (outLayerDims[i] != inputDims[i]) { if (outLayerDims[i] != input_shape[i]) {
startOffset[i] += outLayerDims[i]; start_offset[i] += outLayerDims[i];
} }
} }
} }

View File

@ -20,6 +20,7 @@ static void CreateStridedSliceOp(Program& p, const std::shared_ptr<ngraph::op::v
auto inputPrimitives = p.GetInputPrimitiveIDs(op); auto inputPrimitives = p.GetInputPrimitiveIDs(op);
std::string layerName = layer_type_name_ID(op); std::string layerName = layer_type_name_ID(op);
auto output_pshape = op->get_output_partial_shape(0);
do { do {
auto data_output = op->input_value(0); auto data_output = op->input_value(0);
auto begin_node = std::dynamic_pointer_cast<ngraph::op::v0::Constant>(op->input_value(1).get_node_shared_ptr()); auto begin_node = std::dynamic_pointer_cast<ngraph::op::v0::Constant>(op->input_value(1).get_node_shared_ptr());
@ -32,8 +33,13 @@ static void CreateStridedSliceOp(Program& p, const std::shared_ptr<ngraph::op::v
break; break;
} }
auto input_shape = op->get_input_shape(0); auto input_pshape = op->get_input_partial_shape(0);
auto output_shape = op->get_output_shape(0);
if (input_pshape.is_dynamic() || output_pshape.is_dynamic())
return;
auto input_shape = input_pshape.to_shape();
auto output_shape = output_pshape.to_shape();
auto begin = begin_node->cast_vector<int64_t>(); auto begin = begin_node->cast_vector<int64_t>();
auto end = end_node->cast_vector<int64_t>(); auto end = end_node->cast_vector<int64_t>();
@ -233,6 +239,10 @@ static void CreateStridedSliceOp(Program& p, const std::shared_ptr<ngraph::op::v
return; return;
} while (false); } while (false);
// In case of dynamic shapes pass dummy shape value to strided_slice primitive
// To be removed once we enable internal shape infer for all operations
auto output_shape = output_pshape.is_static() ? output_pshape.to_shape() : ov::Shape{};
auto stridedSlicePrim = cldnn::strided_slice(layerName, auto stridedSlicePrim = cldnn::strided_slice(layerName,
inputPrimitives[0], inputPrimitives[0],
inputPrimitives[1], inputPrimitives[1],
@ -242,7 +252,7 @@ static void CreateStridedSliceOp(Program& p, const std::shared_ptr<ngraph::op::v
op->get_end_mask(), op->get_end_mask(),
op->get_new_axis_mask(), op->get_new_axis_mask(),
op->get_shrink_axis_mask(), op->get_shrink_axis_mask(),
op->get_output_partial_shape(0).to_shape(), output_shape,
op->get_friendly_name()); op->get_friendly_name());
p.AddPrimitive(stridedSlicePrim); p.AddPrimitive(stridedSlicePrim);

View File

@ -59,7 +59,7 @@ static void CreateTransposeOp(Program& p, const std::shared_ptr<ngraph::op::v1::
return; return;
} }
int rank = std::max(4, static_cast<int>(op->get_input_shape(0).size())); int rank = std::max(4, static_cast<int>(op->get_input_partial_shape(0).size()));
if (order.empty()) { if (order.empty()) {
// if order size is less than 4 - fill the rest with just copy // if order size is less than 4 - fill the rest with just copy
for (int o = rank - 1; o >= 0; o--) for (int o = rank - 1; o >= 0; o--)

View File

@ -76,10 +76,10 @@ static void CreatePReluOp(Program& p, const std::shared_ptr<ngraph::op::v0::PRel
p.ValidateInputs(op, {2}); p.ValidateInputs(op, {2});
auto slope_node = std::dynamic_pointer_cast<ngraph::op::v0::Constant>(op->get_input_node_shared_ptr(1)); auto slope_node = std::dynamic_pointer_cast<ngraph::op::v0::Constant>(op->get_input_node_shared_ptr(1));
auto slope_shape = op->get_input_shape(1); auto slope_shape = op->get_input_partial_shape(1);
auto out_shape = op->get_output_shape(0); auto out_shape = op->get_output_partial_shape(0);
if (slope_node && ngraph::shape_size(slope_shape) == 1) { if (slope_node && ngraph::shape_size(slope_shape.to_shape()) == 1) {
float slope; float slope;
if (!ngraph::op::util::get_single_value(slope_node, slope)) if (!ngraph::op::util::get_single_value(slope_node, slope))
IE_THROW() << "Unsupported parameter size in " << op->get_friendly_name() << " (" << op->get_type_name() << ")"; IE_THROW() << "Unsupported parameter size in " << op->get_friendly_name() << " (" << op->get_type_name() << ")";