[PT FE] Enable reverse infer (#15802)
* Enable reverse infer in PT FE * Inherit channes from weight of convolution * Except 1 * Add tests * Add shape propagation for concat
This commit is contained in:
parent
5d3cd81fd1
commit
cbd56c3ed9
@ -15,7 +15,7 @@ bool ov::pass::ReverseShapeAndTypeInfer::inherit_output_shape(const std::shared_
|
||||
auto output_shape = node->get_output_partial_shape(0);
|
||||
|
||||
for (auto idx : input_idxs) {
|
||||
if (node->get_input_partial_shape(idx).rank().is_dynamic()) {
|
||||
if (idx < node->get_input_size() && node->get_input_partial_shape(idx).rank().is_dynamic()) {
|
||||
node->get_input_tensor(idx).m_partial_shape = output_shape;
|
||||
is_changed = true;
|
||||
}
|
||||
@ -43,7 +43,7 @@ bool ov::pass::ReverseShapeAndTypeInfer::inherit_output_type(const std::shared_p
|
||||
auto output_type = node->get_output_element_type(0);
|
||||
|
||||
for (auto idx : input_idxs) {
|
||||
if (node->get_input_element_type(idx).is_dynamic()) {
|
||||
if (idx < node->get_input_size() && node->get_input_element_type(idx).is_dynamic()) {
|
||||
node->get_input_tensor(idx).m_element_type = output_type;
|
||||
is_changed = true;
|
||||
}
|
||||
@ -68,11 +68,41 @@ bool ov::pass::ReverseShapeAndTypeInfer::run_on_model(const std::shared_ptr<ov::
|
||||
param->set_element_type(output_type);
|
||||
is_changed = true;
|
||||
}
|
||||
} else if (std::dynamic_pointer_cast<Convolution>(op) ||
|
||||
std::dynamic_pointer_cast<GroupConvolutionBackpropData>(op) ||
|
||||
std::dynamic_pointer_cast<ConvolutionBackpropData>(op) ||
|
||||
std::dynamic_pointer_cast<GroupConvolution>(op)) {
|
||||
} else if (std::dynamic_pointer_cast<Convolution>(op)) {
|
||||
is_changed |= inherit_output_rank(op, {0, 1});
|
||||
// Inherit channels from weights
|
||||
const auto& weigths_pshape = op->get_input_partial_shape(1);
|
||||
if (weigths_pshape.rank().is_static() && op->get_input_partial_shape(1).rank().is_static() &&
|
||||
weigths_pshape[1] != 1) {
|
||||
op->get_input_tensor(0).m_partial_shape[1] = weigths_pshape[1];
|
||||
}
|
||||
is_changed |= inherit_output_type(op, {0, 1});
|
||||
} else if (std::dynamic_pointer_cast<GroupConvolution>(op)) {
|
||||
is_changed |= inherit_output_rank(op, {0, 1});
|
||||
// Inherit channels from weights
|
||||
const auto& weigths_pshape = op->get_input_partial_shape(1);
|
||||
if (weigths_pshape.rank().is_static() && op->get_input_partial_shape(1).rank().is_static() &&
|
||||
weigths_pshape[2] != 1) {
|
||||
op->get_input_tensor(0).m_partial_shape[1] = weigths_pshape[0] * weigths_pshape[2];
|
||||
}
|
||||
is_changed |= inherit_output_type(op, {0, 1});
|
||||
} else if (std::dynamic_pointer_cast<ConvolutionBackpropData>(op)) {
|
||||
is_changed |= inherit_output_rank(op, {0, 1});
|
||||
// Inherit channels from weights
|
||||
const auto& weigths_pshape = op->get_input_partial_shape(1);
|
||||
if (weigths_pshape.rank().is_static() && op->get_input_partial_shape(1).rank().is_static() &&
|
||||
weigths_pshape[0] != 1) {
|
||||
op->get_input_tensor(0).m_partial_shape[1] = weigths_pshape[0];
|
||||
}
|
||||
is_changed |= inherit_output_type(op, {0, 1});
|
||||
} else if (std::dynamic_pointer_cast<GroupConvolutionBackpropData>(op)) {
|
||||
is_changed |= inherit_output_rank(op, {0, 1});
|
||||
// Inherit channels from weights
|
||||
const auto& weigths_pshape = op->get_input_partial_shape(1);
|
||||
if (weigths_pshape.rank().is_static() && op->get_input_partial_shape(1).rank().is_static() &&
|
||||
weigths_pshape[1] != 1) {
|
||||
op->get_input_tensor(0).m_partial_shape[1] = weigths_pshape[0] * weigths_pshape[1];
|
||||
}
|
||||
is_changed |= inherit_output_type(op, {0, 1});
|
||||
} else if (std::dynamic_pointer_cast<DeformableConvolution>(op)) {
|
||||
is_changed |= inherit_output_rank(op, {0, 1, 2, 3});
|
||||
@ -111,6 +141,114 @@ bool ov::pass::ReverseShapeAndTypeInfer::run_on_model(const std::shared_ptr<ov::
|
||||
}
|
||||
}
|
||||
is_changed |= inherit_output_type(op, {0, 1});
|
||||
} else if (const auto& concat = std::dynamic_pointer_cast<Concat>(op)) {
|
||||
std::vector<size_t> input_idxs(op->get_input_size());
|
||||
std::iota(input_idxs.begin(), input_idxs.end(), 0);
|
||||
|
||||
auto axis = concat->get_axis();
|
||||
if (output_shape.rank().is_static()) {
|
||||
if (axis < 0) {
|
||||
axis = output_shape.rank().get_length() + axis;
|
||||
}
|
||||
auto input_pshape = output_shape;
|
||||
input_pshape[axis] = Dimension::dynamic();
|
||||
for (auto idx : input_idxs) {
|
||||
if (idx < op->get_input_size() && op->get_input_partial_shape(idx).rank().is_dynamic()) {
|
||||
op->get_input_tensor(idx).m_partial_shape = input_pshape;
|
||||
is_changed = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
is_changed |= inherit_output_type(op, input_idxs);
|
||||
} else if (std::dynamic_pointer_cast<Slice>(op)) {
|
||||
is_changed |= inherit_output_rank(op, {0});
|
||||
is_changed |= inherit_output_type(op, {0});
|
||||
} else if (std::dynamic_pointer_cast<Squeeze>(op)) {
|
||||
auto in0_rank = op->get_input_partial_shape(0).rank();
|
||||
if (output_shape.rank().is_static() && in0_rank.is_dynamic() && op->get_input_size() > 1) {
|
||||
auto in1_pshape = op->get_input_partial_shape(1);
|
||||
if (in1_pshape.is_static()) {
|
||||
auto num_dims = in1_pshape.size() == 0 ? 1 : in1_pshape[0].get_length();
|
||||
op->get_input_tensor(0).m_partial_shape =
|
||||
PartialShape::dynamic(output_shape.rank().get_length() + num_dims);
|
||||
}
|
||||
}
|
||||
is_changed |= inherit_output_type(op, {0});
|
||||
} else if (std::dynamic_pointer_cast<Unsqueeze>(op)) {
|
||||
auto in0_rank = op->get_input_partial_shape(0).rank();
|
||||
auto in1_pshape = op->get_input_partial_shape(1);
|
||||
if (output_shape.rank().is_static() && in0_rank.is_dynamic() && in1_pshape.is_static()) {
|
||||
auto num_dims = in1_pshape.size() == 0 ? 1 : in1_pshape[0].get_length();
|
||||
op->get_input_tensor(0).m_partial_shape =
|
||||
PartialShape::dynamic(output_shape.rank().get_length() - num_dims);
|
||||
}
|
||||
is_changed |= inherit_output_type(op, {0});
|
||||
} else if (const auto& if_op = std::dynamic_pointer_cast<If>(op)) {
|
||||
auto then_body = if_op->get_then_body();
|
||||
auto else_body = if_op->get_else_body();
|
||||
// First set types and shapes to Result nodes
|
||||
const auto& then_body_results = then_body->get_results();
|
||||
const auto& else_body_results = else_body->get_results();
|
||||
const auto& then_out_desc = if_op->get_output_descriptions(If::THEN_BODY_INDEX);
|
||||
const auto& else_out_desc = if_op->get_output_descriptions(If::ELSE_BODY_INDEX);
|
||||
for (const auto& out_desc : then_out_desc) {
|
||||
const auto& out_indx = out_desc->m_output_index;
|
||||
const auto& body_indx = out_desc->m_body_value_index;
|
||||
then_body_results[body_indx]->get_input_tensor(0).m_partial_shape =
|
||||
if_op->get_output_partial_shape(out_indx);
|
||||
then_body_results[body_indx]->get_input_tensor(0).m_element_type =
|
||||
if_op->get_output_element_type(out_indx);
|
||||
}
|
||||
for (const auto& out_desc : else_out_desc) {
|
||||
const auto& out_indx = out_desc->m_output_index;
|
||||
const auto& body_indx = out_desc->m_body_value_index;
|
||||
else_body_results[body_indx]->get_input_tensor(0).m_partial_shape =
|
||||
if_op->get_output_partial_shape(out_indx);
|
||||
else_body_results[body_indx]->get_input_tensor(0).m_element_type =
|
||||
if_op->get_output_element_type(out_indx);
|
||||
}
|
||||
is_changed |= run_on_model(then_body);
|
||||
is_changed |= run_on_model(else_body);
|
||||
auto then_body_params = then_body->get_parameters();
|
||||
auto else_body_params = else_body->get_parameters();
|
||||
const auto& then_in_desc = if_op->get_input_descriptions(If::THEN_BODY_INDEX);
|
||||
const auto& else_in_desc = if_op->get_input_descriptions(If::ELSE_BODY_INDEX);
|
||||
for (const auto& in_desc : then_in_desc) {
|
||||
const auto& in_indx = in_desc->m_input_index;
|
||||
const auto& body_indx = in_desc->m_body_parameter_index;
|
||||
if (if_op->get_input_tensor(in_indx).get_partial_shape().rank().is_dynamic()) {
|
||||
if_op->get_input_tensor(in_indx).m_partial_shape =
|
||||
then_body_params.at(body_indx)->get_partial_shape();
|
||||
is_changed = true;
|
||||
}
|
||||
if (if_op->get_input_tensor(in_indx).get_element_type().is_dynamic()) {
|
||||
if_op->get_input_tensor(in_indx).m_element_type =
|
||||
then_body_params.at(body_indx)->get_element_type();
|
||||
is_changed = true;
|
||||
}
|
||||
}
|
||||
for (const auto& in_desc : else_in_desc) {
|
||||
const auto& in_indx = in_desc->m_input_index;
|
||||
const auto& body_indx = in_desc->m_body_parameter_index;
|
||||
if (if_op->get_input_tensor(in_indx).get_partial_shape().rank().is_dynamic()) {
|
||||
if_op->get_input_tensor(in_indx).m_partial_shape =
|
||||
else_body_params.at(body_indx)->get_partial_shape();
|
||||
is_changed = true;
|
||||
}
|
||||
if (if_op->get_input_tensor(in_indx).get_element_type().is_dynamic()) {
|
||||
if_op->get_input_tensor(in_indx).m_element_type =
|
||||
else_body_params.at(body_indx)->get_element_type();
|
||||
is_changed = true;
|
||||
}
|
||||
}
|
||||
// Set type for If condition
|
||||
if (if_op->get_input_element_type(0).is_dynamic()) {
|
||||
if_op->get_input_tensor(0).m_element_type = element::boolean;
|
||||
is_changed = true;
|
||||
}
|
||||
} else if (std::dynamic_pointer_cast<ConvertLike>(op)) {
|
||||
is_changed |= inherit_output_shape(op, {0});
|
||||
is_changed |= inherit_output_type(op, {1});
|
||||
}
|
||||
}
|
||||
return is_changed;
|
||||
|
@ -28,7 +28,7 @@ TEST_F(TransformationTestsF, ConvolutionReverseInfer) {
|
||||
manager.register_pass<pass::ReverseShapeAndTypeInfer>();
|
||||
}
|
||||
{
|
||||
auto data = std::make_shared<opset10::Parameter>(element::f32, PartialShape::dynamic(4));
|
||||
auto data = std::make_shared<opset10::Parameter>(element::f32, PartialShape{DYN, 3, DYN, DYN});
|
||||
auto weights =
|
||||
opset10::Constant::create(element::f32, Shape{64, 3, 7, 7}, std::vector<float>(64 * 3 * 7 * 7, 0.1f));
|
||||
auto conv = std::make_shared<opset10::Convolution>(data,
|
||||
@ -58,7 +58,7 @@ TEST_F(TransformationTestsF, ConvolutionBackpropDataReverseInfer) {
|
||||
manager.register_pass<pass::ReverseShapeAndTypeInfer>();
|
||||
}
|
||||
{
|
||||
auto data = std::make_shared<opset10::Parameter>(element::f32, PartialShape::dynamic(4));
|
||||
auto data = std::make_shared<opset10::Parameter>(element::f32, PartialShape{DYN, 20, DYN, DYN});
|
||||
auto weights =
|
||||
opset10::Constant::create(element::f32, Shape{20, 10, 3, 3}, std::vector<float>(20 * 10 * 3 * 3, 0.1f));
|
||||
auto conv = std::make_shared<opset10::ConvolutionBackpropData>(data,
|
||||
@ -88,7 +88,7 @@ TEST_F(TransformationTestsF, GroupConvolutionReverseInfer) {
|
||||
manager.register_pass<pass::ReverseShapeAndTypeInfer>();
|
||||
}
|
||||
{
|
||||
auto data = std::make_shared<opset10::Parameter>(element::f32, PartialShape::dynamic(4));
|
||||
auto data = std::make_shared<opset10::Parameter>(element::f32, PartialShape{DYN, 9, DYN, DYN});
|
||||
auto weights =
|
||||
opset10::Constant::create(element::f32, Shape{3, 2, 3, 7, 7}, std::vector<float>(3 * 2 * 3 * 7 * 7, 0.1f));
|
||||
auto conv = std::make_shared<opset10::GroupConvolution>(data,
|
||||
@ -119,7 +119,7 @@ TEST_F(TransformationTestsF, GroupConvolutionBackpropDataReverseInfer) {
|
||||
manager.register_pass<pass::ReverseShapeAndTypeInfer>();
|
||||
}
|
||||
{
|
||||
auto data = std::make_shared<opset10::Parameter>(element::f32, PartialShape::dynamic(4));
|
||||
auto data = std::make_shared<opset10::Parameter>(element::f32, PartialShape{DYN, 20, DYN, DYN});
|
||||
auto weights =
|
||||
opset10::Constant::create(element::f32, Shape{4, 5, 2, 3, 3}, std::vector<float>(4 * 5 * 2 * 3 * 3, 0.1f));
|
||||
auto conv = std::make_shared<opset10::GroupConvolutionBackpropData>(data,
|
||||
@ -211,7 +211,7 @@ TEST_F(TransformationTestsF, ActivationReverseInfer) {
|
||||
manager.register_pass<pass::ReverseShapeAndTypeInfer>();
|
||||
}
|
||||
{
|
||||
auto data = std::make_shared<opset10::Parameter>(element::f32, PartialShape::dynamic(4));
|
||||
auto data = std::make_shared<opset10::Parameter>(element::f32, PartialShape{DYN, 3, DYN, DYN});
|
||||
auto relu = std::make_shared<opset10::Relu>(data);
|
||||
auto weights =
|
||||
opset10::Constant::create(element::f32, Shape{64, 3, 7, 7}, std::vector<float>(64 * 3 * 7 * 7, 0.1f));
|
||||
@ -293,3 +293,243 @@ TEST_F(TransformationTestsF, EltwiseScalarLeftReverseInfer) {
|
||||
model_ref = std::make_shared<Model>(ResultVector{result}, ParameterVector{data});
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(TransformationTestsF, ConcatReverseInfer) {
|
||||
{
|
||||
auto data1 = std::make_shared<opset10::Parameter>(element::dynamic, PartialShape::dynamic());
|
||||
// Specify rank and type in one of Concat input to inherit in another
|
||||
auto data2 = std::make_shared<opset10::Parameter>(element::f32, PartialShape{1, 3, 224, 224});
|
||||
auto concat = std::make_shared<opset10::Concat>(OutputVector{data1, data2}, 0);
|
||||
auto result = std::make_shared<opset10::Result>(concat);
|
||||
model = std::make_shared<Model>(ResultVector{result}, ParameterVector{data1, data2});
|
||||
manager.register_pass<pass::ReverseShapeAndTypeInfer>();
|
||||
}
|
||||
{
|
||||
auto data1 = std::make_shared<opset10::Parameter>(element::f32, PartialShape{DYN, 3, 224, 224});
|
||||
auto data2 = std::make_shared<opset10::Parameter>(element::f32, PartialShape{1, 3, 224, 224});
|
||||
auto concat = std::make_shared<opset10::Concat>(OutputVector{data1, data2}, 0);
|
||||
auto result = std::make_shared<opset10::Result>(concat);
|
||||
model_ref = std::make_shared<Model>(ResultVector{result}, ParameterVector{data1, data2});
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(TransformationTestsF, SliceReverseInfer) {
|
||||
{
|
||||
auto data = std::make_shared<opset10::Parameter>(element::dynamic, PartialShape::dynamic());
|
||||
auto start = opset10::Constant::create(element::i32, Shape{1}, {0});
|
||||
auto stop = opset10::Constant::create(element::i32, Shape{1}, {1});
|
||||
auto step = opset10::Constant::create(element::i32, Shape{1}, {1});
|
||||
auto axis = opset10::Constant::create(element::i32, Shape{1}, {0});
|
||||
auto slice = std::make_shared<opset10::Slice>(data, start, stop, step, axis);
|
||||
// Convolution is needed to produce static rank
|
||||
auto weights =
|
||||
opset10::Constant::create(element::f32, Shape{64, 3, 7, 7}, std::vector<float>(64 * 3 * 7 * 7, 0.1f));
|
||||
auto conv = std::make_shared<opset10::Convolution>(slice,
|
||||
weights,
|
||||
Strides{2, 2},
|
||||
CoordinateDiff{3, 3},
|
||||
CoordinateDiff{3, 3},
|
||||
Strides{1, 1});
|
||||
auto result = std::make_shared<opset10::Result>(conv);
|
||||
model = std::make_shared<Model>(ResultVector{result}, ParameterVector{data});
|
||||
manager.register_pass<pass::ReverseShapeAndTypeInfer>();
|
||||
}
|
||||
{
|
||||
auto data = std::make_shared<opset10::Parameter>(element::f32, PartialShape::dynamic(4));
|
||||
auto start = opset10::Constant::create(element::i32, Shape{1}, {0});
|
||||
auto stop = opset10::Constant::create(element::i32, Shape{1}, {1});
|
||||
auto step = opset10::Constant::create(element::i32, Shape{1}, {1});
|
||||
auto axis = opset10::Constant::create(element::i32, Shape{1}, {0});
|
||||
auto slice = std::make_shared<opset10::Slice>(data, start, stop, step, axis);
|
||||
auto weights =
|
||||
opset10::Constant::create(element::f32, Shape{64, 3, 7, 7}, std::vector<float>(64 * 3 * 7 * 7, 0.1f));
|
||||
auto conv = std::make_shared<opset10::Convolution>(slice,
|
||||
weights,
|
||||
Strides{2, 2},
|
||||
CoordinateDiff{3, 3},
|
||||
CoordinateDiff{3, 3},
|
||||
Strides{1, 1});
|
||||
auto result = std::make_shared<opset10::Result>(conv);
|
||||
model_ref = std::make_shared<Model>(ResultVector{result}, ParameterVector{data});
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(TransformationTestsF, SqueezeReverseInfer) {
|
||||
{
|
||||
auto data = std::make_shared<opset10::Parameter>(element::dynamic, PartialShape::dynamic());
|
||||
auto axes = opset10::Constant::create(element::i32, Shape{2}, {0, 1});
|
||||
auto squeeze = std::make_shared<opset10::Squeeze>(data, axes);
|
||||
// Convolution is needed to produce static rank
|
||||
auto weights =
|
||||
opset10::Constant::create(element::f32, Shape{64, 3, 7, 7}, std::vector<float>(64 * 3 * 7 * 7, 0.1f));
|
||||
auto conv = std::make_shared<opset10::Convolution>(squeeze,
|
||||
weights,
|
||||
Strides{2, 2},
|
||||
CoordinateDiff{3, 3},
|
||||
CoordinateDiff{3, 3},
|
||||
Strides{1, 1});
|
||||
auto result = std::make_shared<opset10::Result>(conv);
|
||||
model = std::make_shared<Model>(ResultVector{result}, ParameterVector{data});
|
||||
manager.register_pass<pass::ReverseShapeAndTypeInfer>();
|
||||
}
|
||||
{
|
||||
auto data = std::make_shared<opset10::Parameter>(element::f32, PartialShape::dynamic(6));
|
||||
auto axes = opset10::Constant::create(element::i32, Shape{2}, {0, 1});
|
||||
auto squeeze = std::make_shared<opset10::Squeeze>(data, axes);
|
||||
auto weights =
|
||||
opset10::Constant::create(element::f32, Shape{64, 3, 7, 7}, std::vector<float>(64 * 3 * 7 * 7, 0.1f));
|
||||
auto conv = std::make_shared<opset10::Convolution>(squeeze,
|
||||
weights,
|
||||
Strides{2, 2},
|
||||
CoordinateDiff{3, 3},
|
||||
CoordinateDiff{3, 3},
|
||||
Strides{1, 1});
|
||||
auto result = std::make_shared<opset10::Result>(conv);
|
||||
model_ref = std::make_shared<Model>(ResultVector{result}, ParameterVector{data});
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(TransformationTestsF, UnsqueezeReverseInfer) {
|
||||
{
|
||||
auto data = std::make_shared<opset10::Parameter>(element::dynamic, PartialShape::dynamic());
|
||||
auto axes = opset10::Constant::create(element::i32, Shape{1}, {0});
|
||||
auto unsqueeze = std::make_shared<opset10::Unsqueeze>(data, axes);
|
||||
// Convolution is needed to produce static rank
|
||||
auto weights =
|
||||
opset10::Constant::create(element::f32, Shape{64, 3, 7, 7}, std::vector<float>(64 * 3 * 7 * 7, 0.1f));
|
||||
auto conv = std::make_shared<opset10::Convolution>(unsqueeze,
|
||||
weights,
|
||||
Strides{2, 2},
|
||||
CoordinateDiff{3, 3},
|
||||
CoordinateDiff{3, 3},
|
||||
Strides{1, 1});
|
||||
auto result = std::make_shared<opset10::Result>(conv);
|
||||
model = std::make_shared<Model>(ResultVector{result}, ParameterVector{data});
|
||||
manager.register_pass<pass::ReverseShapeAndTypeInfer>();
|
||||
}
|
||||
{
|
||||
auto data = std::make_shared<opset10::Parameter>(element::f32, PartialShape::dynamic(3));
|
||||
auto axes = opset10::Constant::create(element::i32, Shape{1}, {0});
|
||||
auto unsqueeze = std::make_shared<opset10::Unsqueeze>(data, axes);
|
||||
auto weights =
|
||||
opset10::Constant::create(element::f32, Shape{64, 3, 7, 7}, std::vector<float>(64 * 3 * 7 * 7, 0.1f));
|
||||
auto conv = std::make_shared<opset10::Convolution>(unsqueeze,
|
||||
weights,
|
||||
Strides{2, 2},
|
||||
CoordinateDiff{3, 3},
|
||||
CoordinateDiff{3, 3},
|
||||
Strides{1, 1});
|
||||
auto result = std::make_shared<opset10::Result>(conv);
|
||||
model_ref = std::make_shared<Model>(ResultVector{result}, ParameterVector{data});
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(TransformationTestsF, ConvertLikeReverseInfer) {
|
||||
{
|
||||
// One input has static rank and another has static type
|
||||
auto data1 = std::make_shared<opset10::Parameter>(element::dynamic, PartialShape::dynamic());
|
||||
auto data2 = std::make_shared<opset10::Parameter>(element::dynamic, PartialShape::dynamic());
|
||||
auto convert_like = std::make_shared<opset10::ConvertLike>(data1, data2);
|
||||
// Convolution is needed to produce static rank
|
||||
auto weights =
|
||||
opset10::Constant::create(element::f32, Shape{64, 3, 7, 7}, std::vector<float>(64 * 3 * 7 * 7, 0.1f));
|
||||
auto conv = std::make_shared<opset10::Convolution>(convert_like,
|
||||
weights,
|
||||
Strides{2, 2},
|
||||
CoordinateDiff{3, 3},
|
||||
CoordinateDiff{3, 3},
|
||||
Strides{1, 1});
|
||||
auto result = std::make_shared<opset10::Result>(conv);
|
||||
model = std::make_shared<Model>(ResultVector{result}, ParameterVector{data1, data2});
|
||||
manager.register_pass<pass::ReverseShapeAndTypeInfer>();
|
||||
}
|
||||
{
|
||||
auto data1 = std::make_shared<opset10::Parameter>(element::dynamic, PartialShape{DYN, 3, DYN, DYN});
|
||||
auto data2 = std::make_shared<opset10::Parameter>(element::f32, PartialShape::dynamic());
|
||||
auto convert_like = std::make_shared<opset10::ConvertLike>(data1, data2);
|
||||
auto weights =
|
||||
opset10::Constant::create(element::f32, Shape{64, 3, 7, 7}, std::vector<float>(64 * 3 * 7 * 7, 0.1f));
|
||||
auto conv = std::make_shared<opset10::Convolution>(convert_like,
|
||||
weights,
|
||||
Strides{2, 2},
|
||||
CoordinateDiff{3, 3},
|
||||
CoordinateDiff{3, 3},
|
||||
Strides{1, 1});
|
||||
auto result = std::make_shared<opset10::Result>(conv);
|
||||
model_ref = std::make_shared<Model>(ResultVector{result}, ParameterVector{data1, data2});
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(TransformationTestsF, IfReverseInfer) {
|
||||
{
|
||||
auto X = std::make_shared<opset10::Parameter>(element::dynamic, PartialShape::dynamic());
|
||||
auto cond = std::make_shared<opset10::Parameter>(element::dynamic, PartialShape::dynamic());
|
||||
|
||||
// Body parameters
|
||||
auto Xt = std::make_shared<opset10::Parameter>(element::dynamic, PartialShape::dynamic());
|
||||
auto Xe = std::make_shared<opset10::Parameter>(element::dynamic, PartialShape::dynamic());
|
||||
// Body
|
||||
auto one = opset10::Constant::create(element::f32, Shape{}, {1});
|
||||
auto then_op = std::make_shared<opset10::Add>(Xt, one);
|
||||
auto then_op_res = std::make_shared<opset10::Result>(then_op);
|
||||
auto then_body = std::make_shared<Model>(OutputVector{then_op_res}, ParameterVector{Xt});
|
||||
|
||||
auto neg_one = opset10::Constant::create(element::f32, Shape{}, {-1});
|
||||
auto else_op = std::make_shared<opset10::Add>(Xe, neg_one);
|
||||
auto else_op_res = std::make_shared<opset10::Result>(else_op);
|
||||
auto else_body = std::make_shared<Model>(OutputVector{else_op_res}, ParameterVector{Xe});
|
||||
|
||||
auto if_op = std::make_shared<opset10::If>(cond);
|
||||
if_op->set_then_body(then_body);
|
||||
if_op->set_else_body(else_body);
|
||||
if_op->set_input(X, Xt, Xe);
|
||||
auto res = if_op->set_output(then_op_res, else_op_res);
|
||||
// Convolution is needed to produce static rank
|
||||
auto weights =
|
||||
opset10::Constant::create(element::f32, Shape{64, 3, 7, 7}, std::vector<float>(64 * 3 * 7 * 7, 0.1f));
|
||||
auto conv = std::make_shared<opset10::Convolution>(res,
|
||||
weights,
|
||||
Strides{2, 2},
|
||||
CoordinateDiff{3, 3},
|
||||
CoordinateDiff{3, 3},
|
||||
Strides{1, 1});
|
||||
auto result = std::make_shared<opset10::Result>(conv);
|
||||
model = std::make_shared<Model>(ResultVector{result}, ParameterVector{X, cond});
|
||||
manager.register_pass<pass::ReverseShapeAndTypeInfer>();
|
||||
}
|
||||
{
|
||||
auto X = std::make_shared<opset10::Parameter>(element::f32, PartialShape::dynamic(4));
|
||||
auto cond = std::make_shared<opset10::Parameter>(element::boolean, PartialShape::dynamic());
|
||||
|
||||
// Body parameters
|
||||
auto Xt = std::make_shared<opset10::Parameter>(element::f32, PartialShape::dynamic(4));
|
||||
auto Xe = std::make_shared<opset10::Parameter>(element::f32, PartialShape::dynamic(4));
|
||||
// Body
|
||||
auto one = opset10::Constant::create(element::f32, Shape{}, {1});
|
||||
auto then_op = std::make_shared<opset10::Add>(Xt, one);
|
||||
auto then_op_res = std::make_shared<opset10::Result>(then_op);
|
||||
auto then_body = std::make_shared<Model>(OutputVector{then_op_res}, ParameterVector{Xt});
|
||||
|
||||
auto neg_one = opset10::Constant::create(element::f32, Shape{}, {-1});
|
||||
auto else_op = std::make_shared<opset10::Add>(Xe, neg_one);
|
||||
auto else_op_res = std::make_shared<opset10::Result>(else_op);
|
||||
auto else_body = std::make_shared<Model>(OutputVector{else_op_res}, ParameterVector{Xe});
|
||||
|
||||
auto if_op = std::make_shared<opset10::If>(cond);
|
||||
if_op->set_then_body(then_body);
|
||||
if_op->set_else_body(else_body);
|
||||
if_op->set_input(X, Xt, Xe);
|
||||
auto res = if_op->set_output(then_op_res, else_op_res);
|
||||
// Convolution is needed to produce static rank
|
||||
auto weights =
|
||||
opset10::Constant::create(element::f32, Shape{64, 3, 7, 7}, std::vector<float>(64 * 3 * 7 * 7, 0.1f));
|
||||
auto conv = std::make_shared<opset10::Convolution>(res,
|
||||
weights,
|
||||
Strides{2, 2},
|
||||
CoordinateDiff{3, 3},
|
||||
CoordinateDiff{3, 3},
|
||||
Strides{1, 1});
|
||||
auto result = std::make_shared<opset10::Result>(conv);
|
||||
model_ref = std::make_shared<Model>(ResultVector{result}, ParameterVector{X, cond});
|
||||
}
|
||||
}
|
||||
|
@ -12,6 +12,7 @@
|
||||
#include "pt_framework_node.hpp"
|
||||
#include "transformations/common_optimizations/push_constant_to_subgraph.hpp"
|
||||
#include "transformations/common_optimizations/remove_multi_subgraph_op_dangling_params.hpp"
|
||||
#include "transformations/common_optimizations/reverse_shape_and_type_infer.hpp"
|
||||
#include "transformations/control_flow/unroll_if.hpp"
|
||||
#include "transforms.hpp"
|
||||
#include "transforms/append_list_unpack_replacer.hpp"
|
||||
@ -99,6 +100,7 @@ void FrontEnd::normalize(const std::shared_ptr<ov::Model>& model) const {
|
||||
manager.register_pass<ov::frontend::pytorch::pass::MinMaxPrimListConstructReplacer>();
|
||||
manager.register_pass<ov::frontend::pytorch::pass::DecomposeListTupleResults>();
|
||||
manager.register_pass<ov::pass::RemoveMultiSubGraphOpDanglingParams>();
|
||||
manager.register_pass<ov::pass::ReverseShapeAndTypeInfer>();
|
||||
|
||||
manager.run_passes(model);
|
||||
|
||||
|
@ -277,8 +277,8 @@ bool isSuitableChildForFusingMatMul(const std::shared_ptr<const Node> &node, con
|
||||
|
||||
// Matmul / FC bias fusion
|
||||
if (ov::is_type<ngraph::opset1::Add>(node) &&
|
||||
bias_shape.is_static() && matmul_shape.rbegin()->is_static() &&
|
||||
bias_shape.rbegin()->get_length() == matmul_shape.rbegin()->get_length() &&
|
||||
bias_shape.is_static() &&
|
||||
bias_shape.rbegin()->get_length() == shape_size(bias_shape.get_shape())) {
|
||||
return true;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user