Add reverse infer (#14348)
* Add reverse infer * Add more convolutions and include transformation in normalize of tf fe * Fix code style * Add activations and eltwise support * Fix code style * Add deprecation supression * Fix pad type * Remove eltwise * Remove ngraph namespace usage * Fix code style * Apply review feedback * Add tests for activations and eltwise * Make transformation friend of tensor to avoid deprecated API * clang fix all * Update src/common/transformations/include/transformations/common_optimizations/reverse_shape_and_type_infer.hpp Co-authored-by: Roman Kazantsev <roman.kazantsev@intel.com> Co-authored-by: Roman Kazantsev <roman.kazantsev@intel.com>
This commit is contained in:
parent
7df9031411
commit
9c0ec2c9b4
@ -0,0 +1,30 @@
|
||||
// Copyright (C) 2018-2022 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <openvino/pass/pass.hpp>
|
||||
#include <transformations_visibility.hpp>
|
||||
|
||||
namespace ov {
|
||||
namespace pass {
|
||||
|
||||
class TRANSFORMATIONS_API ReverseShapeAndTypeInfer;
|
||||
|
||||
} // namespace pass
|
||||
} // namespace ov
|
||||
|
||||
/**
|
||||
* @brief Perform reverse shape and type infer to deduce input rank and type in certain cases
|
||||
*/
|
||||
class ov::pass::ReverseShapeAndTypeInfer : public ov::pass::ModelPass {
|
||||
public:
|
||||
OPENVINO_RTTI("ReverseShapeAndTypeInfer", "0");
|
||||
bool run_on_model(const std::shared_ptr<ov::Model>& f) override;
|
||||
|
||||
private:
|
||||
bool inherit_output_shape(const std::shared_ptr<ov::Node>& node, const std::vector<size_t>& input_idxs);
|
||||
bool inherit_output_rank(const std::shared_ptr<ov::Node>& node, const std::vector<size_t>& input_idxs);
|
||||
bool inherit_output_type(const std::shared_ptr<ov::Node>& node, const std::vector<size_t>& input_idxs);
|
||||
};
|
@ -0,0 +1,117 @@
|
||||
// Copyright (C) 2018-2022 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "transformations/common_optimizations/reverse_shape_and_type_infer.hpp"
|
||||
|
||||
#include "itt.hpp"
|
||||
#include "openvino/opsets/opset10.hpp"
|
||||
|
||||
using namespace ov::opset10;
|
||||
|
||||
bool ov::pass::ReverseShapeAndTypeInfer::inherit_output_shape(const std::shared_ptr<ov::Node>& node,
|
||||
const std::vector<size_t>& input_idxs) {
|
||||
auto is_changed = false;
|
||||
auto output_shape = node->get_output_partial_shape(0);
|
||||
|
||||
for (auto idx : input_idxs) {
|
||||
if (node->get_input_partial_shape(idx).rank().is_dynamic()) {
|
||||
node->get_input_tensor(idx).m_partial_shape = output_shape;
|
||||
is_changed = true;
|
||||
}
|
||||
}
|
||||
return is_changed;
|
||||
}
|
||||
|
||||
bool ov::pass::ReverseShapeAndTypeInfer::inherit_output_rank(const std::shared_ptr<ov::Node>& node,
|
||||
const std::vector<size_t>& input_idxs) {
|
||||
auto is_changed = false;
|
||||
auto output_shape = node->get_output_partial_shape(0);
|
||||
|
||||
for (auto idx : input_idxs) {
|
||||
if (idx < node->get_input_size() && node->get_input_partial_shape(idx).rank().is_dynamic()) {
|
||||
node->get_input_tensor(idx).m_partial_shape = ov::PartialShape::dynamic(output_shape.rank());
|
||||
is_changed = true;
|
||||
}
|
||||
}
|
||||
return is_changed;
|
||||
}
|
||||
|
||||
bool ov::pass::ReverseShapeAndTypeInfer::inherit_output_type(const std::shared_ptr<ov::Node>& node,
|
||||
const std::vector<size_t>& input_idxs) {
|
||||
auto is_changed = false;
|
||||
auto output_type = node->get_output_element_type(0);
|
||||
|
||||
for (auto idx : input_idxs) {
|
||||
if (node->get_input_element_type(idx).is_dynamic()) {
|
||||
node->get_input_tensor(idx).m_element_type = output_type;
|
||||
is_changed = true;
|
||||
}
|
||||
}
|
||||
return is_changed;
|
||||
}
|
||||
|
||||
bool ov::pass::ReverseShapeAndTypeInfer::run_on_model(const std::shared_ptr<ov::Model>& f) {
|
||||
RUN_ON_MODEL_SCOPE(ReverseShapeAndTypeInfer);
|
||||
bool is_changed = false;
|
||||
auto ops = f->get_ordered_ops();
|
||||
for (auto it = ops.rbegin(); it != ops.rend(); ++it) {
|
||||
const auto& op = *it;
|
||||
auto output_shape = op->get_output_partial_shape(0);
|
||||
auto output_type = op->get_output_element_type(0);
|
||||
if (const auto& param = std::dynamic_pointer_cast<Parameter>(op)) {
|
||||
if (param->get_partial_shape().rank().is_dynamic()) {
|
||||
param->set_partial_shape(output_shape);
|
||||
is_changed = true;
|
||||
}
|
||||
if (param->get_element_type().is_dynamic()) {
|
||||
param->set_element_type(output_type);
|
||||
is_changed = true;
|
||||
}
|
||||
} else if (std::dynamic_pointer_cast<Convolution>(op) ||
|
||||
std::dynamic_pointer_cast<GroupConvolutionBackpropData>(op) ||
|
||||
std::dynamic_pointer_cast<ConvolutionBackpropData>(op) ||
|
||||
std::dynamic_pointer_cast<GroupConvolution>(op)) {
|
||||
is_changed |= inherit_output_rank(op, {0, 1});
|
||||
is_changed |= inherit_output_type(op, {0, 1});
|
||||
} else if (std::dynamic_pointer_cast<DeformableConvolution>(op)) {
|
||||
is_changed |= inherit_output_rank(op, {0, 1, 2, 3});
|
||||
is_changed |= inherit_output_type(op, {0, 1, 2, 3});
|
||||
} else if (std::dynamic_pointer_cast<Pad>(op)) {
|
||||
// Shape of pads_begin and pads_end must match rank of input
|
||||
if (op->get_input_partial_shape(0).rank().is_dynamic()) {
|
||||
auto pads_begin_shape = op->get_input_partial_shape(1);
|
||||
auto pads_end_shape = op->get_input_partial_shape(2);
|
||||
if (pads_begin_shape.is_static() && pads_begin_shape.size() > 0) {
|
||||
op->get_input_tensor(0).m_partial_shape = PartialShape::dynamic(pads_begin_shape[0]);
|
||||
is_changed = true;
|
||||
} else if (pads_end_shape.is_static() && pads_end_shape.size() > 0) {
|
||||
op->get_input_tensor(0).m_partial_shape = PartialShape::dynamic(pads_end_shape[0]);
|
||||
is_changed = true;
|
||||
}
|
||||
}
|
||||
is_changed |= inherit_output_type(op, {0});
|
||||
} else if (std::dynamic_pointer_cast<op::util::UnaryElementwiseArithmetic>(op)) {
|
||||
is_changed |= inherit_output_shape(op, {0});
|
||||
is_changed |= inherit_output_type(op, {0});
|
||||
} else if (const auto& eltwise = std::dynamic_pointer_cast<op::util::BinaryElementwiseArithmetic>(op)) {
|
||||
if (output_shape.rank().is_static()) {
|
||||
auto in0_rank = op->get_input_partial_shape(0).rank();
|
||||
auto in1_rank = op->get_input_partial_shape(1).rank();
|
||||
if (in0_rank.is_dynamic() && in1_rank.is_static()) {
|
||||
if (eltwise->get_autob() == ov::op::AutoBroadcastType::NONE)
|
||||
op->get_input_tensor(0).m_partial_shape = output_shape;
|
||||
else if (in1_rank.get_length() < output_shape.rank().get_length())
|
||||
op->get_input_tensor(0).m_partial_shape = PartialShape::dynamic(output_shape.rank());
|
||||
} else if (in1_rank.is_dynamic() && in0_rank.is_static()) {
|
||||
if (eltwise->get_autob() == ov::op::AutoBroadcastType::NONE)
|
||||
op->get_input_tensor(1).m_partial_shape = output_shape;
|
||||
else if (in0_rank.get_length() < output_shape.rank().get_length())
|
||||
op->get_input_tensor(1).m_partial_shape = PartialShape::dynamic(output_shape.rank());
|
||||
}
|
||||
}
|
||||
is_changed |= inherit_output_type(op, {0, 1});
|
||||
}
|
||||
}
|
||||
return is_changed;
|
||||
}
|
@ -0,0 +1,295 @@
|
||||
// Copyright (C) 2018-2022 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include <openvino/opsets/opset10.hpp>
|
||||
#include <transformations/common_optimizations/reverse_shape_and_type_infer.hpp>
|
||||
|
||||
#include "common_test_utils/ngraph_test_utils.hpp"
|
||||
|
||||
using namespace testing;
|
||||
using namespace ov;
|
||||
|
||||
TEST_F(TransformationTestsF, ConvolutionReverseInfer) {
|
||||
{
|
||||
auto data = std::make_shared<opset10::Parameter>(element::dynamic, PartialShape::dynamic());
|
||||
auto weights =
|
||||
opset10::Constant::create(element::f32, Shape{64, 3, 7, 7}, std::vector<float>(64 * 3 * 7 * 7, 0.1));
|
||||
auto conv = std::make_shared<opset10::Convolution>(data,
|
||||
weights,
|
||||
Strides{2, 2},
|
||||
CoordinateDiff{3, 3},
|
||||
CoordinateDiff{3, 3},
|
||||
Strides{1, 1});
|
||||
auto result = std::make_shared<opset10::Result>(conv);
|
||||
model = std::make_shared<Model>(ResultVector{result}, ParameterVector{data});
|
||||
manager.register_pass<pass::ReverseShapeAndTypeInfer>();
|
||||
}
|
||||
{
|
||||
auto data = std::make_shared<opset10::Parameter>(element::f32, PartialShape::dynamic(4));
|
||||
auto weights =
|
||||
opset10::Constant::create(element::f32, Shape{64, 3, 7, 7}, std::vector<float>(64 * 3 * 7 * 7, 0.1));
|
||||
auto conv = std::make_shared<opset10::Convolution>(data,
|
||||
weights,
|
||||
Strides{2, 2},
|
||||
CoordinateDiff{3, 3},
|
||||
CoordinateDiff{3, 3},
|
||||
Strides{1, 1});
|
||||
auto result = std::make_shared<opset10::Result>(conv);
|
||||
model_ref = std::make_shared<Model>(ResultVector{result}, ParameterVector{data});
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(TransformationTestsF, ConvolutionBackpropDataReverseInfer) {
|
||||
{
|
||||
auto data = std::make_shared<opset10::Parameter>(element::dynamic, PartialShape::dynamic());
|
||||
auto weights =
|
||||
opset10::Constant::create(element::f32, Shape{20, 10, 3, 3}, std::vector<float>(20 * 10 * 3 * 3, 0.1));
|
||||
auto conv = std::make_shared<opset10::ConvolutionBackpropData>(data,
|
||||
weights,
|
||||
Strides{2, 2},
|
||||
CoordinateDiff{1, 1},
|
||||
CoordinateDiff{1, 1},
|
||||
Strides{1, 1});
|
||||
auto result = std::make_shared<opset10::Result>(conv);
|
||||
model = std::make_shared<Model>(ResultVector{result}, ParameterVector{data});
|
||||
manager.register_pass<pass::ReverseShapeAndTypeInfer>();
|
||||
}
|
||||
{
|
||||
auto data = std::make_shared<opset10::Parameter>(element::f32, PartialShape::dynamic(4));
|
||||
auto weights =
|
||||
opset10::Constant::create(element::f32, Shape{20, 10, 3, 3}, std::vector<float>(20 * 10 * 3 * 3, 0.1));
|
||||
auto conv = std::make_shared<opset10::ConvolutionBackpropData>(data,
|
||||
weights,
|
||||
Strides{2, 2},
|
||||
CoordinateDiff{1, 1},
|
||||
CoordinateDiff{1, 1},
|
||||
Strides{1, 1});
|
||||
auto result = std::make_shared<opset10::Result>(conv);
|
||||
model_ref = std::make_shared<Model>(ResultVector{result}, ParameterVector{data});
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(TransformationTestsF, GroupConvolutionReverseInfer) {
|
||||
{
|
||||
auto data = std::make_shared<opset10::Parameter>(element::dynamic, PartialShape::dynamic());
|
||||
auto weights =
|
||||
opset10::Constant::create(element::f32, Shape{3, 2, 3, 7, 7}, std::vector<float>(3 * 2 * 3 * 7 * 7, 0.1));
|
||||
auto conv = std::make_shared<opset10::GroupConvolution>(data,
|
||||
weights,
|
||||
Strides{1, 1},
|
||||
CoordinateDiff{1, 1},
|
||||
CoordinateDiff{1, 1},
|
||||
Strides{1, 1});
|
||||
auto result = std::make_shared<opset10::Result>(conv);
|
||||
model = std::make_shared<Model>(ResultVector{result}, ParameterVector{data});
|
||||
manager.register_pass<pass::ReverseShapeAndTypeInfer>();
|
||||
}
|
||||
{
|
||||
auto data = std::make_shared<opset10::Parameter>(element::f32, PartialShape::dynamic(4));
|
||||
auto weights =
|
||||
opset10::Constant::create(element::f32, Shape{3, 2, 3, 7, 7}, std::vector<float>(3 * 2 * 3 * 7 * 7, 0.1));
|
||||
auto conv = std::make_shared<opset10::GroupConvolution>(data,
|
||||
weights,
|
||||
Strides{1, 1},
|
||||
CoordinateDiff{1, 1},
|
||||
CoordinateDiff{1, 1},
|
||||
Strides{1, 1});
|
||||
auto result = std::make_shared<opset10::Result>(conv);
|
||||
model = std::make_shared<Model>(ResultVector{result}, ParameterVector{data});
|
||||
manager.register_pass<pass::ReverseShapeAndTypeInfer>();
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(TransformationTestsF, GroupConvolutionBackpropDataReverseInfer) {
|
||||
{
|
||||
auto data = std::make_shared<opset10::Parameter>(element::dynamic, PartialShape::dynamic());
|
||||
auto weights =
|
||||
opset10::Constant::create(element::f32, Shape{4, 5, 2, 3, 3}, std::vector<float>(4 * 5 * 2 * 3 * 3, 0.1));
|
||||
auto conv = std::make_shared<opset10::GroupConvolutionBackpropData>(data,
|
||||
weights,
|
||||
Strides{2, 2},
|
||||
CoordinateDiff{1, 1},
|
||||
CoordinateDiff{1, 1},
|
||||
Strides{1, 1});
|
||||
auto result = std::make_shared<opset10::Result>(conv);
|
||||
model = std::make_shared<Model>(ResultVector{result}, ParameterVector{data});
|
||||
manager.register_pass<pass::ReverseShapeAndTypeInfer>();
|
||||
}
|
||||
{
|
||||
auto data = std::make_shared<opset10::Parameter>(element::f32, PartialShape::dynamic(4));
|
||||
auto weights =
|
||||
opset10::Constant::create(element::f32, Shape{4, 5, 2, 3, 3}, std::vector<float>(4 * 5 * 2 * 3 * 3, 0.1));
|
||||
auto conv = std::make_shared<opset10::GroupConvolutionBackpropData>(data,
|
||||
weights,
|
||||
Strides{2, 2},
|
||||
CoordinateDiff{1, 1},
|
||||
CoordinateDiff{1, 1},
|
||||
Strides{1, 1});
|
||||
auto result = std::make_shared<opset10::Result>(conv);
|
||||
model = std::make_shared<Model>(ResultVector{result}, ParameterVector{data});
|
||||
manager.register_pass<pass::ReverseShapeAndTypeInfer>();
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(TransformationTestsF, DeformableConvolutionReverseInfer) {
|
||||
{
|
||||
auto data = std::make_shared<opset10::Parameter>(element::dynamic, PartialShape::dynamic());
|
||||
auto offsets = std::make_shared<opset10::Parameter>(element::dynamic, PartialShape::dynamic());
|
||||
auto weights =
|
||||
opset10::Constant::create(element::f32, Shape{64, 4, 5, 5}, std::vector<float>(64 * 4 * 5 * 5, 0.1));
|
||||
auto mask = std::make_shared<opset10::Parameter>(element::dynamic, PartialShape::dynamic());
|
||||
auto conv = std::make_shared<opset10::DeformableConvolution>(data,
|
||||
offsets,
|
||||
weights,
|
||||
mask,
|
||||
Strides{1, 1},
|
||||
CoordinateDiff{0, 0},
|
||||
CoordinateDiff{0, 0},
|
||||
Strides{1, 1});
|
||||
auto result = std::make_shared<opset10::Result>(conv);
|
||||
model = std::make_shared<Model>(ResultVector{result}, ParameterVector{data, offsets, mask});
|
||||
manager.register_pass<pass::ReverseShapeAndTypeInfer>();
|
||||
}
|
||||
{
|
||||
auto data = std::make_shared<opset10::Parameter>(element::f32, PartialShape::dynamic(4));
|
||||
auto offsets = std::make_shared<opset10::Parameter>(element::f32, PartialShape::dynamic(4));
|
||||
auto weights =
|
||||
opset10::Constant::create(element::f32, Shape{64, 4, 5, 5}, std::vector<float>(64 * 4 * 5 * 5, 0.1));
|
||||
auto mask = std::make_shared<opset10::Parameter>(element::f32, PartialShape::dynamic(4));
|
||||
auto conv = std::make_shared<opset10::DeformableConvolution>(data,
|
||||
offsets,
|
||||
weights,
|
||||
mask,
|
||||
Strides{1, 1},
|
||||
CoordinateDiff{0, 0},
|
||||
CoordinateDiff{0, 0},
|
||||
Strides{1, 1});
|
||||
auto result = std::make_shared<opset10::Result>(conv);
|
||||
model_ref = std::make_shared<Model>(ResultVector{result}, ParameterVector{data, offsets, mask});
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(TransformationTestsF, PadReverseInfer) {
|
||||
{
|
||||
auto data = std::make_shared<opset10::Parameter>(element::dynamic, PartialShape::dynamic());
|
||||
auto pads_begin = opset10::Constant::create(element::i64, Shape{4}, {0, 0, 0, 0});
|
||||
auto pads_end = opset10::Constant::create(element::i64, Shape{4}, {0, 0, 0, 0});
|
||||
auto value = opset10::Constant::create(element::f32, Shape{}, {0});
|
||||
auto pad = std::make_shared<opset10::Pad>(data, pads_begin, pads_end, value, op::PadMode::CONSTANT);
|
||||
auto result = std::make_shared<opset10::Result>(pad);
|
||||
model = std::make_shared<Model>(ResultVector{result}, ParameterVector{data});
|
||||
manager.register_pass<pass::ReverseShapeAndTypeInfer>();
|
||||
}
|
||||
{
|
||||
auto data = std::make_shared<opset10::Parameter>(element::f32, PartialShape::dynamic(4));
|
||||
auto pads_begin = opset10::Constant::create(element::i64, Shape{4}, {0, 0, 0, 0});
|
||||
auto pads_end = opset10::Constant::create(element::i64, Shape{4}, {0, 0, 0, 0});
|
||||
auto value = opset10::Constant::create(element::f32, Shape{}, {0});
|
||||
auto pad = std::make_shared<opset10::Pad>(data, pads_begin, pads_end, value, op::PadMode::CONSTANT);
|
||||
auto result = std::make_shared<opset10::Result>(pad);
|
||||
model_ref = std::make_shared<Model>(ResultVector{result}, ParameterVector{data});
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(TransformationTestsF, ActivationReverseInfer) {
|
||||
{
|
||||
auto data = std::make_shared<opset10::Parameter>(element::dynamic, PartialShape::dynamic());
|
||||
auto relu = std::make_shared<opset10::Relu>(data);
|
||||
auto weights =
|
||||
opset10::Constant::create(element::f32, Shape{64, 3, 7, 7}, std::vector<float>(64 * 3 * 7 * 7, 0.1));
|
||||
auto conv = std::make_shared<opset10::Convolution>(relu,
|
||||
weights,
|
||||
Strides{2, 2},
|
||||
CoordinateDiff{3, 3},
|
||||
CoordinateDiff{3, 3},
|
||||
Strides{1, 1});
|
||||
auto result = std::make_shared<opset10::Result>(conv);
|
||||
model = std::make_shared<Model>(ResultVector{result}, ParameterVector{data});
|
||||
manager.register_pass<pass::ReverseShapeAndTypeInfer>();
|
||||
}
|
||||
{
|
||||
auto data = std::make_shared<opset10::Parameter>(element::f32, PartialShape::dynamic(4));
|
||||
auto relu = std::make_shared<opset10::Relu>(data);
|
||||
auto weights =
|
||||
opset10::Constant::create(element::f32, Shape{64, 3, 7, 7}, std::vector<float>(64 * 3 * 7 * 7, 0.1));
|
||||
auto conv = std::make_shared<opset10::Convolution>(relu,
|
||||
weights,
|
||||
Strides{2, 2},
|
||||
CoordinateDiff{3, 3},
|
||||
CoordinateDiff{3, 3},
|
||||
Strides{1, 1});
|
||||
auto result = std::make_shared<opset10::Result>(conv);
|
||||
model_ref = std::make_shared<Model>(ResultVector{result}, ParameterVector{data});
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(TransformationTestsF, EltwiseScalarRightReverseInfer) {
|
||||
{
|
||||
auto data = std::make_shared<opset10::Parameter>(element::dynamic, PartialShape::dynamic());
|
||||
auto add_const = opset10::Constant::create(element::f32, Shape{}, {1});
|
||||
auto add = std::make_shared<opset10::Add>(data, add_const);
|
||||
auto weights =
|
||||
opset10::Constant::create(element::f32, Shape{64, 3, 7, 7}, std::vector<float>(64 * 3 * 7 * 7, 0.1));
|
||||
auto conv = std::make_shared<opset10::Convolution>(add,
|
||||
weights,
|
||||
Strides{2, 2},
|
||||
CoordinateDiff{3, 3},
|
||||
CoordinateDiff{3, 3},
|
||||
Strides{1, 1});
|
||||
auto result = std::make_shared<opset10::Result>(conv);
|
||||
model = std::make_shared<Model>(ResultVector{result}, ParameterVector{data});
|
||||
manager.register_pass<pass::ReverseShapeAndTypeInfer>();
|
||||
}
|
||||
{
|
||||
auto data = std::make_shared<opset10::Parameter>(element::f32, PartialShape::dynamic(4));
|
||||
auto add_const = opset10::Constant::create(element::f32, Shape{}, {1});
|
||||
auto add = std::make_shared<opset10::Add>(data, add_const);
|
||||
auto weights =
|
||||
opset10::Constant::create(element::f32, Shape{64, 3, 7, 7}, std::vector<float>(64 * 3 * 7 * 7, 0.1));
|
||||
auto conv = std::make_shared<opset10::Convolution>(add,
|
||||
weights,
|
||||
Strides{2, 2},
|
||||
CoordinateDiff{3, 3},
|
||||
CoordinateDiff{3, 3},
|
||||
Strides{1, 1});
|
||||
auto result = std::make_shared<opset10::Result>(conv);
|
||||
model_ref = std::make_shared<Model>(ResultVector{result}, ParameterVector{data});
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(TransformationTestsF, EltwiseScalarLeftReverseInfer) {
|
||||
{
|
||||
auto data = std::make_shared<opset10::Parameter>(element::dynamic, PartialShape::dynamic());
|
||||
auto add_const = opset10::Constant::create(element::f32, Shape{}, {1});
|
||||
auto add = std::make_shared<opset10::Add>(add_const, data);
|
||||
auto weights =
|
||||
opset10::Constant::create(element::f32, Shape{64, 3, 7, 7}, std::vector<float>(64 * 3 * 7 * 7, 0.1));
|
||||
auto conv = std::make_shared<opset10::Convolution>(add,
|
||||
weights,
|
||||
Strides{2, 2},
|
||||
CoordinateDiff{3, 3},
|
||||
CoordinateDiff{3, 3},
|
||||
Strides{1, 1});
|
||||
auto result = std::make_shared<opset10::Result>(conv);
|
||||
model = std::make_shared<Model>(ResultVector{result}, ParameterVector{data});
|
||||
manager.register_pass<pass::ReverseShapeAndTypeInfer>();
|
||||
}
|
||||
{
|
||||
auto data = std::make_shared<opset10::Parameter>(element::f32, PartialShape::dynamic(4));
|
||||
auto add_const = opset10::Constant::create(element::f32, Shape{}, {1});
|
||||
auto add = std::make_shared<opset10::Add>(add_const, data);
|
||||
auto weights =
|
||||
opset10::Constant::create(element::f32, Shape{64, 3, 7, 7}, std::vector<float>(64 * 3 * 7 * 7, 0.1));
|
||||
auto conv = std::make_shared<opset10::Convolution>(add,
|
||||
weights,
|
||||
Strides{2, 2},
|
||||
CoordinateDiff{3, 3},
|
||||
CoordinateDiff{3, 3},
|
||||
Strides{1, 1});
|
||||
auto result = std::make_shared<opset10::Result>(conv);
|
||||
model_ref = std::make_shared<Model>(ResultVector{result}, ParameterVector{data});
|
||||
}
|
||||
}
|
@ -27,6 +27,9 @@ using HostTensorPtr = std::shared_ptr<runtime::HostTensor>;
|
||||
namespace ov {
|
||||
class Node;
|
||||
using TensorLabel = std::vector<size_t>;
|
||||
namespace pass {
|
||||
class ReverseShapeAndTypeInfer;
|
||||
}
|
||||
namespace descriptor {
|
||||
|
||||
class Tensor;
|
||||
@ -137,6 +140,7 @@ protected:
|
||||
|
||||
friend OPENVINO_API std::string get_ov_tensor_legacy_name(const Tensor& tensor);
|
||||
friend OPENVINO_API void set_ov_tensor_legacy_name(Tensor& tensor, const std::string& tensor_name);
|
||||
friend class pass::ReverseShapeAndTypeInfer;
|
||||
};
|
||||
|
||||
OPENVINO_API
|
||||
|
@ -66,16 +66,24 @@ bool ngraph::op::v1::Pad::visit_attributes(AttributeVisitor& visitor) {
|
||||
|
||||
void op::v1::Pad::validate_and_infer_types() {
|
||||
OV_OP_SCOPE(v1_Pad_validate_and_infer_types);
|
||||
element::Type result_et;
|
||||
element::Type result_et = element::dynamic;
|
||||
|
||||
const auto& arg_element_type = get_input_element_type(0);
|
||||
const auto& pads_begin_element_type = get_input_element_type(1);
|
||||
const auto& pads_end_element_type = get_input_element_type(2);
|
||||
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
element::Type::merge(result_et, result_et, arg_element_type),
|
||||
"Cannot merge element types (input arg element type: ",
|
||||
arg_element_type,
|
||||
", with: ",
|
||||
result_et,
|
||||
").");
|
||||
|
||||
if (m_pad_mode == PadMode::CONSTANT && get_input_size() == 4) {
|
||||
const auto& arg_pad_element_type = get_input_element_type(3);
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
element::Type::merge(result_et, arg_element_type, arg_pad_element_type),
|
||||
element::Type::merge(result_et, result_et, arg_pad_element_type),
|
||||
"Argument element types do not match (input arg element type: ",
|
||||
arg_element_type,
|
||||
", arg_pad element type: ",
|
||||
@ -100,7 +108,7 @@ void op::v1::Pad::validate_and_infer_types() {
|
||||
for (size_t i = 0; i < get_input_size(); i++)
|
||||
input_shapes.push_back(get_input_partial_shape(i));
|
||||
shape_infer(this, input_shapes, output_shapes);
|
||||
set_output_type(0, get_input_element_type(0), output_shapes[0]);
|
||||
set_output_type(0, result_et, output_shapes[0]);
|
||||
}
|
||||
|
||||
shared_ptr<Node> op::v1::Pad::clone_with_new_inputs(const OutputVector& new_args) const {
|
||||
|
@ -231,3 +231,13 @@ TEST(type_prop, pad_v1_any_dim_for_padding_edge) {
|
||||
auto pad = make_shared<op::v1::Pad>(arg, pads_begin, pads_end, op::PadMode::EDGE);
|
||||
ASSERT_TRUE(pad->get_output_partial_shape(0).same_scheme(PartialShape{2, 53, Dimension::dynamic(), 1}));
|
||||
}
|
||||
|
||||
TEST(type_prop, pad_v1_dynamic_input_type_with_static_value) {
|
||||
auto arg = make_shared<op::Parameter>(element::dynamic, Shape{1, 2, 3});
|
||||
auto pads_begin = make_shared<op::Parameter>(element::i32, Shape{1});
|
||||
auto pads_end = make_shared<op::Parameter>(element::i32, Shape{1});
|
||||
auto arg_pad_value = op::Constant::create(element::f32, Shape{}, {0});
|
||||
|
||||
auto pad = make_shared<op::v1::Pad>(arg, pads_begin, pads_end, arg_pad_value, op::PadMode::CONSTANT);
|
||||
ASSERT_EQ(pad->get_output_element_type(0), element::f32);
|
||||
}
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include "pass/transpose_sinking.hpp"
|
||||
#include "so_extension.hpp"
|
||||
#include "tf_framework_node.hpp"
|
||||
#include "transformations/common_optimizations/reverse_shape_and_type_infer.hpp"
|
||||
#include "utils.hpp"
|
||||
|
||||
using namespace ov;
|
||||
@ -442,6 +443,7 @@ void FrontEnd::normalize(const std::shared_ptr<ov::Model>& function) const {
|
||||
|
||||
// TODO: reimplement TransposeSinking that does not corrupt filters for Convolution
|
||||
manager.register_pass<ov::frontend::tensorflow::pass::TransposeSinking>();
|
||||
manager.register_pass<ov::pass::ReverseShapeAndTypeInfer>();
|
||||
manager.run_passes(function);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user