Add tests for TS backward transformations, update TransposeSinkingFuse transformation, delete StridedSlice transformation prototype + tests refactoring

This commit is contained in:
Ivan 2023-03-09 01:33:46 +04:00
parent db09fe4965
commit b60015f90d
20 changed files with 593 additions and 722 deletions

View File

@ -1,30 +0,0 @@
// Copyright (C) 2022-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "openvino/pass/graph_rewrite.hpp"
#include "openvino/pass/pass.hpp"
#include "transformations_visibility.hpp"
namespace ov {
namespace pass {
class TRANSFORMATIONS_API TransposeSinkingStridedSliceForward;
class TRANSFORMATIONS_API TransposeSinkingStridedSliceBackward;
} // namespace pass
} // namespace ov
class ov::pass::TransposeSinkingStridedSliceForward : public ov::pass::MatcherPass {
public:
OPENVINO_RTTI("ov::pass::TransposeSinkingStridedSliceForward", "0");
TransposeSinkingStridedSliceForward();
};
class ov::pass::TransposeSinkingStridedSliceBackward : public ov::pass::MatcherPass {
public:
OPENVINO_RTTI("ov::pass::TransposeSinkingStridedSliceBackward", "0");
TransposeSinkingStridedSliceBackward();
};

View File

@ -102,7 +102,7 @@ ov::pass::TransposeSinkingDataMovementBackward::TransposeSinkingDataMovementBack
// remove output transposes
RemoveSingleOutputConsumers(main_node);
SwapNames(main_node, transpose);
const auto transpose_axis_order = transpose_const->get_axis_vector_val();
auto axis = std::make_shared<Constant>(element::i32, Shape{}, std::vector<int32_t>{0});

View File

@ -19,75 +19,55 @@ using namespace opset10;
ov::pass::TransposeSinkingFuse::TransposeSinkingFuse() {
MATCHER_SCOPE(TransposeFuse);
auto transpose_label = pattern::wrap_type<Transpose>({pattern::any_input(), pattern::wrap_type<Constant>()});
auto transpose_1_label = pattern::wrap_type<Transpose>({pattern::any_input(), pattern::wrap_type<Constant>()}, transpose_sinking::HasSameOutputTransposeNodes);
auto transpose_2_label = pattern::wrap_type<Transpose>({transpose_1_label, pattern::wrap_type<Constant>()});
ov::matcher_pass_callback matcher_pass_callback = [=](pattern::Matcher& m) {
const auto& pattern_to_output = m.get_pattern_map();
auto transpose_1 = pattern_to_output.at(transpose_label);
auto order_const_1 = std::dynamic_pointer_cast<Constant>(transpose_1->input_value(1).get_node_shared_ptr());
auto consumers = transpose_1->get_output_target_inputs(0);
std::vector<int64_t> saved_order_values;
auto saved_type = order_const_1->get_element_type();
for (const auto& it : consumers) {
auto out_transpose = dynamic_cast<Transpose*>(it.get_node());
if (!out_transpose) {
return false;
}
auto transpose1 = pattern_to_output.at(transpose_1_label);
auto transpose2 = pattern_to_output.at(transpose_2_label);
auto input = transpose1->input_value(0);
auto order = out_transpose->input_value(1).get_node_shared_ptr();
auto order_const = std::dynamic_pointer_cast<Constant>(order);
if (!order_const) {
return false;
}
auto order_values = order_const->cast_vector<int64_t>();
if (order_values.empty()) {
return false;
}
if (saved_order_values.empty()) {
saved_order_values = order_values;
} else {
if (saved_order_values != order_values) {
return false;
}
}
if (order_const->get_element_type() != saved_type) {
saved_type = element::i64;
}
}
auto order1 = order_const_1->cast_vector<int64_t>();
if (order1.size() != saved_order_values.size()) {
auto transpose1_order = std::dynamic_pointer_cast<Constant>(transpose1->get_input_node_shared_ptr(1));
auto transpose2_order = std::dynamic_pointer_cast<Constant>(transpose2->get_input_node_shared_ptr(1));
if (!transpose1_order || !transpose2_order)
return false;
auto order1 = transpose1_order->cast_vector<int64_t>();
auto order2 = transpose2_order->cast_vector<int64_t>();
if (order1.size() != order2.size())
return false;
}
bool is_ordered = true;
for (size_t i = 0; i < order1.size(); i++) {
saved_order_values[i] = order1[saved_order_values[i]];
if (saved_order_values[i] != (int64_t)i)
order2[i] = order1[order2[i]];
if (order2[i] != static_cast<int64_t>(i))
is_ordered = false;
}
auto transpose_order_type = transpose1_order->get_element_type();
if (transpose_order_type != transpose2_order->get_element_type())
transpose_order_type = element::i64;
if (is_ordered) {
for (const auto& it : consumers) {
it.get_node()->output(0).replace(transpose_1->input_value(0));
for (const auto& out_transpose : transpose1->output(0).get_target_inputs()) {
ov::replace_output_update_name(out_transpose.get_node()->output(0), input);
}
} else {
auto new_order = Constant::create(saved_type, {saved_order_values.size()}, saved_order_values);
auto new_transpose = register_new_node<Transpose>(transpose_1->input_value(0), new_order);
for (const auto& it : consumers) {
new_transpose->set_friendly_name(it.get_node()->get_friendly_name());
it.get_node()->output(0).replace(new_transpose);
copy_runtime_info(transpose_1, new_transpose);
}
auto new_order = Constant::create(transpose_order_type, {order2.size()}, order2);
auto new_transpose = register_new_node<Transpose>(input, new_order);
new_transpose->set_friendly_name(m.get_match_root()->get_friendly_name());
transpose_sinking::RemoveSingleOutputConsumers(transpose1);
copy_runtime_info(transpose1, new_transpose);
ngraph::replace_node(transpose1, new_transpose);
transpose_sinking::UpdateForwardSinkingAbility(new_transpose);
}
return true;
};
auto m = std::make_shared<pattern::Matcher>(transpose_label, matcher_name);
auto m = std::make_shared<pattern::Matcher>(transpose_2_label, matcher_name);
register_matcher(m, matcher_pass_callback);
}

View File

@ -111,7 +111,7 @@ ov::pass::TransposeSinkingInterpolateBackward::TransposeSinkingInterpolateBackwa
// remove output transposes
RemoveSingleOutputConsumers(main_node);
SwapNames(main_node, transpose);
const auto transpose_axis_order = transpose_const->get_axis_vector_val();
const auto reversed_transpose_order = ReverseTransposeOrder(transpose_axis_order);
auto axis = std::make_shared<Constant>(element::i32, Shape{}, std::vector<int32_t>{0});

View File

@ -70,7 +70,6 @@ std::vector<size_t> get_updated_order_backward(const std::vector<size_t>& axes_v
bool get_keep_dims(const std::shared_ptr<Node>& reduction) {
auto arithmetic_reduce = std::dynamic_pointer_cast<ov::op::util::ArithmeticReductionKeepDims>(reduction);
auto logical_reduce = std::dynamic_pointer_cast<ov::op::util::LogicalReductionKeepDims>(reduction);
// auto squeeze = std::dynamic_pointer_cast<opset6::Squeeze>(reduction);
bool keep_dims = false; // squeeze always reduces number of output dimensions
if (logical_reduce)

View File

@ -1,163 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "transformations/common_optimizations/transpose_sinking_strided_slice.hpp"
#include <openvino/pass/pattern/op/or.hpp>
#include "itt.hpp"
#include "openvino/op/util/op_types.hpp"
#include "openvino/opsets/opset10.hpp"
#include "openvino/pass/pattern/op/wrap_type.hpp"
#include "openvino/util/common_util.hpp"
#include "transformations/common_optimizations/transpose_sinking_utils.hpp"
#include "transformations/rt_info/transpose_sinking_attr.hpp"
using namespace ov;
using namespace ov::opset10;
using namespace ov::pass::pattern;
using namespace transpose_sinking;
ov::pass::TransposeSinkingStridedSliceForward::TransposeSinkingStridedSliceForward() {
MATCHER_SCOPE(TransposeSinkingStridedSliceForward);
auto const_label = wrap_type<Constant>();
auto transpose_label = wrap_type<Transpose>({any_input(), const_label});
auto main_node_label = wrap_type<StridedSlice>({transpose_label, any_input(), any_input(), any_input()});
matcher_pass_callback matcher_pass_callback = [=](Matcher& m) {
const auto& pattern_to_node = m.get_pattern_map();
auto& main_node = pattern_to_node.at(main_node_label);
auto transpose = std::dynamic_pointer_cast<Transpose>(pattern_to_node.at(transpose_label));
if (!transpose) {
return false;
}
auto transpose_const = as_type_ptr<Constant>(pattern_to_node.at(const_label));
if (!transpose_const) {
return false;
}
const auto& strided_slice = std::dynamic_pointer_cast<StridedSlice>(main_node);
if (!strided_slice) {
return false;
}
auto elipsis_mask = strided_slice->get_ellipsis_mask();
auto new_axis_mask = strided_slice->get_new_axis_mask();
auto shrink_mask = strided_slice->get_shrink_axis_mask();
if (!elipsis_mask.empty() || !new_axis_mask.empty() || !shrink_mask.empty()) {
// not supported yet
return false;
}
// remove Transpose on 1st input:
auto transpose_parent = main_node->input_value(0).get_node()->input_value(0);
main_node->input(0).replace_source_output(transpose_parent);
// change the order of values for PadBegin and PadEng inputs
const auto transpose_axis_order = transpose_const->get_axis_vector_val();
const auto reversed_transpose_order = ReverseTransposeOrder(transpose_axis_order);
auto axis = std::make_shared<Constant>(element::i32, Shape{}, std::vector<int32_t>{0});
main_node->input(1).replace_source_output(
ChangeValuesOrder(main_node->input_value(1), reversed_transpose_order, axis));
main_node->input(2).replace_source_output(
ChangeValuesOrder(main_node->input_value(2), reversed_transpose_order, axis));
main_node->input(3).replace_source_output(
ChangeValuesOrder(main_node->input_value(3), reversed_transpose_order, axis));
const auto& begin_mask = strided_slice->get_begin_mask();
const auto& end_mask = strided_slice->get_end_mask();
const auto& order_size = transpose_axis_order.size();
std::vector<int64_t> new_begin_mask(order_size), new_end_mask(order_size);
for (size_t i = 0; i < order_size; ++i) {
new_begin_mask[i] = begin_mask[transpose_axis_order[i]];
new_end_mask[i] = end_mask[transpose_axis_order[i]];
}
strided_slice->set_begin_mask(new_begin_mask);
strided_slice->set_begin_mask(new_end_mask);
main_node->validate_and_infer_types();
TransposeInputsInfo transpose_input_info = {transpose, transpose_const, 0};
for (auto& new_node : sink_forward::InsertOutputTransposes(main_node, transpose_input_info)) {
register_new_node(new_node);
transpose_sinking::UpdateForwardSinkingAbility(new_node);
}
return true;
};
auto m = std::make_shared<Matcher>(main_node_label, matcher_name);
register_matcher(m, matcher_pass_callback);
}
ov::pass::TransposeSinkingStridedSliceBackward::TransposeSinkingStridedSliceBackward() {
MATCHER_SCOPE(TransposeSinkingDataMovementBackward);
auto main_node_label = wrap_type<StridedSlice>([](const Output<Node>& output) -> bool {
return has_static_rank()(output) && HasSameOutputTransposeNodes(output);
});
auto transpose_const_label = wrap_type<Constant>();
auto transpose_label =
wrap_type<Transpose>({main_node_label, transpose_const_label}, [](const Output<Node>& output) -> bool {
return has_static_rank()(output) && is_sinking_node(output);
});
matcher_pass_callback matcher_pass_callback = [=](Matcher& m) {
const auto& pattern_to_output = m.get_pattern_value_map();
auto transpose_const = as_type_ptr<Constant>(pattern_to_output.at(transpose_const_label).get_node_shared_ptr());
auto transpose = pattern_to_output.at(transpose_label).get_node_shared_ptr();
auto main_node = pattern_to_output.at(main_node_label).get_node_shared_ptr();
const auto& strided_slice = std::dynamic_pointer_cast<StridedSlice>(main_node);
if (!strided_slice) {
return false;
}
auto elipsis_mask = strided_slice->get_ellipsis_mask();
auto new_axis_mask = strided_slice->get_new_axis_mask();
auto shrink_mask = strided_slice->get_shrink_axis_mask();
if (!elipsis_mask.empty() || !new_axis_mask.empty() || !shrink_mask.empty()) {
// not supported yet
return false;
}
for (auto& new_node : sink_backward::InsertTransposeBeforeNode(main_node,
transpose_const,
/* input_indexes= */ {0})) {
register_new_node(new_node);
}
// remove output transposes
RemoveSingleOutputConsumers(main_node);
const auto transpose_axis_order = transpose_const->get_axis_vector_val();
auto axis = std::make_shared<Constant>(element::i32, Shape{}, std::vector<int32_t>{0});
main_node->input(1).replace_source_output(
ChangeValuesOrder(main_node->input_value(1), transpose_axis_order, axis));
main_node->input(2).replace_source_output(
ChangeValuesOrder(main_node->input_value(2), transpose_axis_order, axis));
main_node->input(3).replace_source_output(
ChangeValuesOrder(main_node->input_value(3), transpose_axis_order, axis));
const auto reversed_transpose_order = ReverseTransposeOrder(transpose_axis_order);
const auto& begin_mask = strided_slice->get_begin_mask();
const auto& end_mask = strided_slice->get_end_mask();
const auto& order_size = reversed_transpose_order.size();
std::vector<int64_t> new_begin_mask(order_size), new_end_mask(order_size);
for (size_t i = 0; i < order_size; ++i) {
new_begin_mask[i] = begin_mask[reversed_transpose_order[i]];
new_end_mask[i] = end_mask[reversed_transpose_order[i]];
}
strided_slice->set_begin_mask(new_begin_mask);
strided_slice->set_begin_mask(new_end_mask);
main_node->validate_and_infer_types();
return true;
};
auto m = std::make_shared<Matcher>(transpose_label, matcher_name);
register_matcher(m, matcher_pass_callback);
}

View File

@ -111,6 +111,7 @@ ov::pass::TransposeSinkingUnaryBackward::TransposeSinkingUnaryBackward() {
unary->validate_and_infer_types();
// remove output transposes
RemoveSingleOutputConsumers(unary);
SwapNames(transpose, unary);
return true;
};

View File

@ -14,16 +14,12 @@
#include "transpose_sinking_test_utils.hpp"
using namespace ov;
using namespace ov::opset9;
using namespace ov::opset10;
using namespace transpose_sinking::testing;
namespace transpose_sinking_binary_eltwise {
namespace {
using NodePtr = std::shared_ptr<ov::Node>;
using ModelPtr = std::shared_ptr<Model>;
using Output = ov::Output<ov::Node>;
namespace {
std::string to_string(const Shape& shape) {
std::ostringstream result;

View File

@ -13,11 +13,16 @@
#include "transformations/common_optimizations/transpose_sinking_reduction.hpp"
#include "transformations/common_optimizations/transpose_sinking_split.hpp"
#include "transformations/common_optimizations/transpose_sinking_unary.hpp"
#include "transformations/common_optimizations/transpose_sinking_interpolate.hpp"
#include "transpose_sinking_test_utils.hpp"
using namespace std;
using namespace ov;
using namespace ov::opset10;
using namespace transpose_sinking::testing;
namespace transpose_sinking {
namespace common {
template <typename UnaryT>
class UnaryFactory : public IFactory {
@ -138,7 +143,7 @@ class ReductionFactory : public IFactory {
public:
explicit ReductionFactory(const std::string& type_name) : IFactory(type_name) {}
NodePtr create(const OutputVector& parent_nodes) const override {
return std::make_shared<ReductionT>(parent_nodes[0], parent_nodes[1]);
return std::make_shared<ReductionT>(parent_nodes[0], parent_nodes[1], true);
}
};
@ -146,6 +151,34 @@ template <typename ReductionT>
FactoryPtr CreateReductionFactory(const std::string& type_name) {
return std::make_shared<ReductionFactory<ReductionT>>(type_name);
}
class InterpolateFactory : public IFactory {
public:
explicit InterpolateFactory(const std::string& type_name, bool is_reference) : IFactory(type_name), m_is_reference(is_reference) {}
NodePtr create(const OutputVector& parent_nodes) const override {
std::vector<size_t> pads_begin{1, 2, 3, 4};
std::vector<size_t> pads_end{1, 2, 3, 4};
if (m_is_reference) {
pads_begin = {4, 3, 2, 1};
pads_end = {4, 3, 2, 1};
}
const Interpolate::InterpolateAttrs attrs{Interpolate::InterpolateMode::NEAREST,
Interpolate::ShapeCalcMode::SCALES,
pads_begin,
pads_end,
Interpolate::CoordinateTransformMode::HALF_PIXEL,
Interpolate::NearestMode::ROUND_PREFER_FLOOR,
false,
-0.75};
return std::make_shared<Interpolate>(parent_nodes[0], parent_nodes[1], parent_nodes[2], parent_nodes[3], attrs);
}
private:
bool m_is_reference = false;
};
FactoryPtr CreateInterpolateFactory(const std::string& type_name, bool is_reference) {
return std::make_shared<InterpolateFactory>(type_name, is_reference);
}
// ----------------------------------------------------------------------------
#undef CREATE_UNARY_FACTORY
@ -174,6 +207,9 @@ FactoryPtr CreateReductionFactory(const std::string& type_name) {
#undef CREATE_REDUCTION_FACTORY
#define CREATE_REDUCTION_FACTORY(type_name) CreateReductionFactory<type_name>(#type_name)
#undef CREATE_INTERPOLATE_FACTORY
#define CREATE_INTERPOLATE_FACTORY(type_name, reference_flag) CreateInterpolateFactory(#type_name, reference_flag)
// ----------------------------------------------------------------------------
struct Preprocessing {
@ -226,7 +262,7 @@ struct TestCase {
class TransposeSinkingTestFixture : public ::testing::WithParamInterface<TestParams>, public TransformationTestsF {
public:
static string get_test_name(testing::TestParamInfo<TestParams> obj) {
static string get_test_name(const ::testing::TestParamInfo<TestParams>& obj) {
size_t num_main_ops_idx;
size_t main_op_idx;
TestCase test_case;
@ -241,16 +277,22 @@ public:
};
vector<FactoryPtr> unary_factories = {
CREATE_UNARY_FACTORY(Clamp), CREATE_UNARY_FACTORY(Elu), CREATE_UNARY_FACTORY(SoftPlus),
CREATE_UNARY_FACTORY(LogicalNot), CREATE_UNARY_FACTORY(Convert), CREATE_UNARY_FACTORY(Abs),
CREATE_UNARY_FACTORY(Acos), CREATE_UNARY_FACTORY(Asin), CREATE_UNARY_FACTORY(Asinh),
CREATE_UNARY_FACTORY(Atan), CREATE_UNARY_FACTORY(Ceiling), CREATE_UNARY_FACTORY(Cos),
CREATE_UNARY_FACTORY(Cosh), CREATE_UNARY_FACTORY(Erf), CREATE_UNARY_FACTORY(Exp),
CREATE_UNARY_FACTORY(Gelu), CREATE_UNARY_FACTORY(HSigmoid), CREATE_UNARY_FACTORY(HSwish),
CREATE_UNARY_FACTORY(Log), CREATE_UNARY_FACTORY(Negative), CREATE_UNARY_FACTORY(Relu),
CREATE_UNARY_FACTORY(Sigmoid), CREATE_UNARY_FACTORY(Sign), CREATE_UNARY_FACTORY(Sin),
CREATE_UNARY_FACTORY(Sinh), CREATE_UNARY_FACTORY(SoftSign), CREATE_UNARY_FACTORY(Sqrt),
CREATE_UNARY_FACTORY(Tan), CREATE_UNARY_FACTORY(Tanh)};
CREATE_UNARY_FACTORY(Abs), CREATE_UNARY_FACTORY(Acos), CREATE_UNARY_FACTORY(Acosh),
CREATE_UNARY_FACTORY(Asin), CREATE_UNARY_FACTORY(Asinh), CREATE_UNARY_FACTORY(Atan),
CREATE_UNARY_FACTORY(Atanh), CREATE_UNARY_FACTORY(Ceiling), CREATE_UNARY_FACTORY(Clamp),
CREATE_UNARY_FACTORY(Cos), CREATE_UNARY_FACTORY(Cosh), CREATE_UNARY_FACTORY(Convert),
CREATE_UNARY_FACTORY(Erf), CREATE_UNARY_FACTORY(Elu), CREATE_UNARY_FACTORY(Exp),
CREATE_UNARY_FACTORY(Floor), CREATE_UNARY_FACTORY(Gelu), CREATE_UNARY_FACTORY(HSigmoid),
CREATE_UNARY_FACTORY(HSwish), CREATE_UNARY_FACTORY(Log), CREATE_UNARY_FACTORY(LogicalNot),
CREATE_UNARY_FACTORY(Mish), CREATE_UNARY_FACTORY(Negative), CREATE_UNARY_FACTORY(Relu),
CREATE_UNARY_FACTORY(Sigmoid), CREATE_UNARY_FACTORY(Sign), CREATE_UNARY_FACTORY(Sin),
CREATE_UNARY_FACTORY(Sinh), CREATE_UNARY_FACTORY(SoftPlus), CREATE_UNARY_FACTORY(SoftSign),
CREATE_UNARY_FACTORY(Sqrt), CREATE_UNARY_FACTORY(Tan), CREATE_UNARY_FACTORY(Tanh)};
vector<FactoryPtr> logical_unary_factories = {
CREATE_UNARY_FACTORY(IsFinite),
CREATE_UNARY_FACTORY(IsInf),
CREATE_UNARY_FACTORY(IsNaN)};
std::vector<FactoryPtr> binary_factories = {CREATE_BINARY_FACTORY(Add),
CREATE_BINARY_FACTORY(Divide),
@ -264,17 +306,13 @@ std::vector<FactoryPtr> binary_factories = {CREATE_BINARY_FACTORY(Add),
CREATE_BINARY_FACTORY(PRelu)};
std::vector<FactoryPtr> reduction_factories = {
CREATE_BINARY_FACTORY(ReduceMax),
CREATE_BINARY_FACTORY(ReduceMin),
CREATE_BINARY_FACTORY(ReduceMean),
CREATE_BINARY_FACTORY(ReduceSum),
CREATE_BINARY_FACTORY(ReduceProd),
// CREATE_BINARY_FACTORY(ReduceLogicalOr),
// CREATE_BINARY_FACTORY(ReduceLogicalAnd),
CREATE_BINARY_FACTORY(ReduceL1),
CREATE_BINARY_FACTORY(ReduceL2),
// CREATE_BINARY_FACTORY(Squeeze),
// CREATE_BINARY_FACTORY(Unsqueeze),
CREATE_REDUCTION_FACTORY(ReduceMax),
CREATE_REDUCTION_FACTORY(ReduceMin),
CREATE_REDUCTION_FACTORY(ReduceMean),
CREATE_REDUCTION_FACTORY(ReduceSum),
CREATE_REDUCTION_FACTORY(ReduceProd),
CREATE_REDUCTION_FACTORY(ReduceL1),
CREATE_REDUCTION_FACTORY(ReduceL2),
};
TEST_P(TransposeSinkingTestFixture, CompareFunctions) {
@ -292,15 +330,13 @@ TEST_P(TransposeSinkingTestFixture, CompareFunctions) {
test_case.num_main_ops[num_main_ops_idx],
test_case.inputs_to_main);
test_case.transformation->registerPass(manager);
// TODO: enable accuracy testing. The current issues: div by 0
// comparator.enable(FunctionsComparator::CmpValues::ACCURACY);
if (test_case.model.main_op[0]->getTypeName() == "Split") {
disable_result_friendly_names_check();
}
comparator.enable(FunctionsComparator::CmpValues::CONST_VALUES);
comparator.enable(FunctionsComparator::CmpValues::ATTRIBUTES);
}
namespace transpose_sinking {
namespace common {
shared_ptr<ov::Model> create_model(size_t main_node_idx,
const ModelDescription& model_desc,
size_t num_ops,
@ -310,8 +346,6 @@ shared_ptr<ov::Model> create_model(size_t main_node_idx,
auto outputs = model_desc.preprocess_outputs_of_main.apply(main_node->outputs());
return make_shared<ov::Model>(outputs, filter_parameters(inputs_to_main));
}
} // namespace common
} // namespace transpose_sinking
auto wrapper = [](const TestCase& test_case) {
OPENVINO_ASSERT(test_case.model.main_op.size() == test_case.model_ref.main_op.size(),
@ -322,30 +356,31 @@ auto wrapper = [](const TestCase& test_case) {
::testing::Values(test_case));
};
auto test_forward_unary = []() {
auto test_forward_unary = [](const vector<FactoryPtr>& factories, const vector<size_t>& num_main_ops) {
TestCase test_case;
// Initialize common attributes
test_case.transformation = CREATE_PASS_FACTORY(TransposeSinkingUnaryForward);
test_case.num_main_ops = {1, 10};
test_case.num_main_ops = num_main_ops;
test_case.inputs_to_main = {
parameter(element::f32, {1, 96, 55, 55}),
};
// Test model description:
test_case.model.preprocess_inputs_to_main = {{set_transpose_for}, {{0}}};
test_case.model.main_op = unary_factories;
test_case.model.main_op = factories;
test_case.model.model_template = transpose_sinking::common::create_model;
// Reference model description:
test_case.model_ref.main_op = unary_factories;
test_case.model_ref.main_op = factories;
test_case.model_ref.preprocess_outputs_of_main = {{set_transpose_for}, {{0}}};
test_case.model_ref.model_template = transpose_sinking::common::create_model;
return wrapper(test_case);
};
INSTANTIATE_TEST_SUITE_P(TransposeSinkingCommonUnaryForward, TransposeSinkingTestFixture, test_forward_unary());
INSTANTIATE_TEST_SUITE_P(TransposeSinkingCommonUnaryForward, TransposeSinkingTestFixture, test_forward_unary(unary_factories, {1, 10}));
INSTANTIATE_TEST_SUITE_P(TransposeSinkingCommonLogicalUnaryForward, TransposeSinkingTestFixture, test_forward_unary(logical_unary_factories, {1}));
auto test_forward_binary = []() {
TestCase test_case;
@ -410,7 +445,7 @@ auto test_forward_split = []() {
test_case.num_main_ops = {1, 2};
test_case.inputs_to_main = {
parameter(element::f32, {1, 9, 55, 55}),
constant(element::i32, {}, {2}),
constant<int64_t>(element::i32, {}, {2}),
};
// Test model description:
@ -444,8 +479,8 @@ auto test_forward_pad = []() {
test_case.num_main_ops = {1, 2};
test_case.inputs_to_main = {
parameter(element::f32, {1, 3, 55, 55}),
constant(element::i32, {4}, {1, 2, 3, 4}),
constant(element::i32, {4}, {1, 2, 3, 4}),
constant<int64_t>(element::i32, {4}, {1, 2, 3, 4}),
constant<int64_t>(element::i32, {4}, {1, 2, 3, 4}),
};
// Test model description:
@ -472,9 +507,9 @@ auto test_forward_batch_to_space = []() {
test_case.num_main_ops = {1, 2};
test_case.inputs_to_main = {
parameter(element::f32, {128, 55, 3, 128}),
constant(element::i32, {4}, {1, 2, 2, 2}),
constant(element::i32, {4}, {1, 2, 2, 2}),
constant(element::i32, {4}, {1, 2, 2, 2}),
constant<int64_t>(element::i32, {4}, {1, 2, 2, 2}),
constant<int64_t>(element::i32, {4}, {1, 2, 2, 2}),
constant<int64_t>(element::i32, {4}, {1, 2, 2, 2}),
};
// Test model description:
@ -503,9 +538,9 @@ auto test_forward_space_to_batch = []() {
test_case.num_main_ops = {1};
test_case.inputs_to_main = {
parameter(element::f32, {64, 9, 8, 1}),
constant(element::i32, {4}, {1, 2, 3, 4}),
constant(element::i32, {4}, {1, 2, 3, 4}),
constant(element::i32, {4}, {1, 2, 3, 4}),
constant<int64_t>(element::i32, {4}, {1, 2, 3, 4}),
constant<int64_t>(element::i32, {4}, {1, 2, 3, 4}),
constant<int64_t>(element::i32, {4}, {1, 2, 3, 4}),
};
// Test model description:
@ -534,7 +569,7 @@ auto test_forward_reduction = []() {
test_case.num_main_ops = {1};
test_case.inputs_to_main = {
parameter(element::f32, {32, 4, 2, 1}),
constant(element::i32, {2}, {1, 3}),
constant<int64_t>(element::i32, {2}, {1, 3}),
};
// Test model description:
@ -559,3 +594,322 @@ auto test_forward_reduction = []() {
};
INSTANTIATE_TEST_SUITE_P(TransposeSinkingCommonReductionForward, TransposeSinkingTestFixture, test_forward_reduction());
auto test_forward_interpolate = []() {
TestCase test_case;
// Initialize common attributes
test_case.transformation = CREATE_PASS_FACTORY(TransposeSinkingInterpolateForward);
test_case.num_main_ops = {1};
test_case.inputs_to_main = {
parameter(element::f32, {1, 2, 48, 80}),
constant<int64_t>(element::i32, {2}, {24, 160}),
constant<float>(element::f32, {2}, {0.5, 2.}),
constant<int64_t>(element::i32, {2}, {1, 2}),
};
// Test model description:
test_case.model.preprocess_inputs_to_main = {{set_transpose_for}, {{0}}};
test_case.model.main_op = {CREATE_INTERPOLATE_FACTORY(Interpolate, false)};
test_case.model.model_template = transpose_sinking::common::create_model;
// Reference model description:
auto set_specific_gather_for = [](const vector<size_t>& idxs, const OutputVector& out_vec) -> OutputVector {
OutputVector result = out_vec;
for (const auto& idx : idxs) {
const auto& out = out_vec[idx];
vector<int64_t> transpose_order(out_vec[0].get_shape().size());
iota(transpose_order.begin(), transpose_order.end(), 0);
reverse(transpose_order.begin(), transpose_order.end());
auto data = make_shared<Constant>(element::i32, Shape{transpose_order.size()}, transpose_order);
auto axis = make_shared<Constant>(element::i32, Shape{}, 0);
auto transpose = make_shared<Gather>(data, out, axis);
result[idx] = transpose;
}
return result;
};
test_case.model_ref.preprocess_inputs_to_main = {{set_specific_gather_for}, {{3}}};
test_case.model_ref.main_op = {CREATE_INTERPOLATE_FACTORY(Interpolate, true)};
test_case.model_ref.preprocess_outputs_of_main = {{set_transpose_for}, {{0}}};
test_case.model_ref.model_template = transpose_sinking::common::create_model;
return wrapper(test_case);
};
INSTANTIATE_TEST_SUITE_P(TransposeSinkingCommonInterpolateForward, TransposeSinkingTestFixture, test_forward_interpolate());
// ------------------ BACKWARD --------------------
auto test_backward_unary = []() {
TestCase test_case;
// Initialize common attributes
test_case.transformation = CREATE_PASS_FACTORY(TransposeSinkingUnaryBackward);
test_case.num_main_ops = {1, 10};
test_case.inputs_to_main = {
parameter(element::f32, {1, 96, 55, 55}),
};
// Test model description:
test_case.model.main_op = unary_factories;
test_case.model.preprocess_outputs_of_main = {{set_transpose_for}, {{0}}};
test_case.model.model_template = transpose_sinking::common::create_model;
// Reference model description:
test_case.model_ref.preprocess_inputs_to_main = {{set_transpose_for}, {{0}}};
test_case.model_ref.main_op = unary_factories;
test_case.model_ref.model_template = transpose_sinking::common::create_model;
return wrapper(test_case);
};
INSTANTIATE_TEST_SUITE_P(TransposeSinkingCommonUnaryBackward, TransposeSinkingTestFixture, test_backward_unary());
auto test_backward_binary = []() {
TestCase test_case;
// Initialize common attributes
test_case.transformation = CREATE_PASS_FACTORY(TransposeSinkingBinaryBackward);
test_case.num_main_ops = {1, 10};
test_case.inputs_to_main = {
parameter(element::f32, {1, 96, 55, 55}),
parameter(element::f32, {1, 96, 55, 55}),
};
// Test model description:
test_case.model.main_op = binary_factories;
test_case.model.preprocess_outputs_of_main = {{set_transpose_for}, {{0}}};
test_case.model.model_template = transpose_sinking::common::create_model;
// Reference model description:
test_case.model_ref.preprocess_inputs_to_main = {{set_transpose_for}, {{0, 1}}};
test_case.model_ref.main_op = binary_factories;
test_case.model_ref.model_template = transpose_sinking::common::create_model;
return wrapper(test_case);
};
INSTANTIATE_TEST_SUITE_P(TransposeSinkingCommonBinaryBackward, TransposeSinkingTestFixture, test_backward_binary());
auto test_backward_concat = []() {
TestCase test_case;
// Initialize common attributes
test_case.transformation = CREATE_PASS_FACTORY(TransposeSinkingConcatBackward);
test_case.num_main_ops = {1, 3};
test_case.inputs_to_main = {
parameter(element::f32, {1, 96, 55, 55}),
parameter(element::f32, {1, 96, 55, 55}),
parameter(element::f32, {1, 96, 55, 55}),
};
// Test model description:
test_case.model.main_op = {CREATE_CONCAT_FACTORY(Concat)};
test_case.model.preprocess_outputs_of_main = {{set_transpose_for}, {{0}}};
test_case.model.model_template = transpose_sinking::common::create_model;
// Reference model description:
test_case.model_ref.preprocess_inputs_to_main = {{set_transpose_for}, {{0, 1, 2}}};
test_case.model_ref.main_op = {CREATE_CONCAT_REF_FACTORY(Concat)};
test_case.model_ref.model_template = transpose_sinking::common::create_model;
return wrapper(test_case);
};
INSTANTIATE_TEST_SUITE_P(TransposeSinkingCommonConcatBackward, TransposeSinkingTestFixture, test_backward_concat());
auto test_backward_split = []() {
TestCase test_case;
// Initialize common attributes
test_case.transformation = CREATE_PASS_FACTORY(TransposeSinkingSplitBackward);
test_case.num_main_ops = {1, 2};
test_case.inputs_to_main = {
parameter(element::f32, {1, 9, 55, 55}),
constant<int64_t>(element::i32, {}, {1}),
};
// Test model description:
test_case.model.main_op = {CREATE_SPLIT_FACTORY(Split)};
test_case.model.preprocess_outputs_of_main = {{set_transpose_for}, {{0, 1, 2}}};
test_case.model.model_template = transpose_sinking::common::create_model;
// Reference model description:
auto new_constant = [](const vector<size_t>& idxs, const OutputVector& out_vec) -> OutputVector {
OutputVector new_out_vec(out_vec.size());
new_out_vec[0] = out_vec[0];
new_out_vec[1] =
make_shared<Constant>(out_vec[1].get_element_type(), out_vec[1].get_shape(), std::vector<int64_t>{2});
return new_out_vec;
};
test_case.model_ref.preprocess_inputs_to_main = {{set_transpose_for, new_constant}, {{0}, {1}}};
test_case.model_ref.main_op = {CREATE_SPLIT_FACTORY(Split)};
test_case.model_ref.model_template = transpose_sinking::common::create_model;
return wrapper(test_case);
};
INSTANTIATE_TEST_SUITE_P(TransposeSinkingCommonSplitBackward, TransposeSinkingTestFixture, test_backward_split());
auto test_backward_pad = []() {
TestCase test_case;
// Initialize common attributes
test_case.transformation = CREATE_PASS_FACTORY(TransposeSinkingDataMovementBackward);
test_case.num_main_ops = {1, 2};
test_case.inputs_to_main = {
parameter(element::f32, {1, 3, 55, 55}),
constant<int64_t>(element::i32, {4}, {1, 2, 3, 4}),
constant<int64_t>(element::i32, {4}, {1, 2, 3, 4}),
};
// Test model description:
test_case.model.main_op = {CREATE_PAD_FACTORY(Pad)};
test_case.model.preprocess_outputs_of_main = {{set_transpose_for}, {{0}}};
test_case.model.model_template = transpose_sinking::common::create_model;
// Reference model description:
test_case.model_ref.preprocess_inputs_to_main = {{set_transpose_for, set_gather_for}, {{0}, {1, 2}}};
test_case.model_ref.main_op = {CREATE_PAD_FACTORY(Pad)};
test_case.model_ref.model_template = transpose_sinking::common::create_model;
return wrapper(test_case);
};
INSTANTIATE_TEST_SUITE_P(TransposeSinkingCommonPadBackward, TransposeSinkingTestFixture, test_backward_pad());
auto test_backward_batch_to_space = []() {
TestCase test_case;
// Initialize common attributes
test_case.transformation = CREATE_PASS_FACTORY(TransposeSinkingDataMovementBackward);
test_case.num_main_ops = {1};
test_case.inputs_to_main = {
parameter(element::f32, {128, 55, 3, 128}),
constant<int64_t>(element::i32, {4}, {1, 2, 2, 2}),
constant<int64_t>(element::i32, {4}, {1, 2, 2, 2}),
constant<int64_t>(element::i32, {4}, {1, 2, 2, 2}),
};
// Reference model description:
test_case.model.main_op = {CREATE_BATCH_TO_SPACE_FACTORY(BatchToSpace)};
test_case.model.preprocess_outputs_of_main = {{set_transpose_for}, {{0}}};
test_case.model.model_template = transpose_sinking::common::create_model;
// Test model description:
test_case.model_ref.preprocess_inputs_to_main = {{set_transpose_for, set_gather_for}, {{0}, {1, 2, 3}}};
test_case.model_ref.main_op = {CREATE_BATCH_TO_SPACE_FACTORY(BatchToSpace)};
test_case.model_ref.model_template = transpose_sinking::common::create_model;
return wrapper(test_case);
};
INSTANTIATE_TEST_SUITE_P(TransposeSinkingCommonBatchToSpaceBackward,
TransposeSinkingTestFixture,
test_backward_batch_to_space());
auto test_backward_space_to_batch = []() {
TestCase test_case;
// Initialize common attributes
test_case.transformation = CREATE_PASS_FACTORY(TransposeSinkingDataMovementBackward);
test_case.num_main_ops = {1};
test_case.inputs_to_main = {
parameter(element::f32, {1, 8, 9, 64}),
constant<int64_t>(element::i32, {4}, {1, 2, 3, 4}),
constant<int64_t>(element::i32, {4}, {1, 2, 3, 4}),
constant<int64_t>(element::i32, {4}, {1, 2, 3, 4}),
};
// Test model description:
test_case.model.main_op = {CREATE_SPACE_TO_BATCH_FACTORY(SpaceToBatch)};
test_case.model.preprocess_outputs_of_main = {{set_transpose_for}, {{0}}};
test_case.model.model_template = transpose_sinking::common::create_model;
// Reference model description:
test_case.model_ref.preprocess_inputs_to_main = {{set_transpose_for, set_gather_for}, {{0}, {1, 2, 3}}};
test_case.model_ref.main_op = {CREATE_SPACE_TO_BATCH_FACTORY(SpaceToBatch)};
test_case.model_ref.model_template = transpose_sinking::common::create_model;
return wrapper(test_case);
};
INSTANTIATE_TEST_SUITE_P(TransposeSinkingCommonSpaceToBatchBackward,
TransposeSinkingTestFixture,
test_backward_space_to_batch());
auto test_backward_reduction = []() {
TestCase test_case;
// Initialize common attributes
test_case.transformation = CREATE_PASS_FACTORY(TransposeSinkingReductionBackward);
test_case.num_main_ops = {1};
test_case.inputs_to_main = {
parameter(element::f32, {32, 4, 2, 1}),
constant<int64_t>(element::i32, {2}, {1, 3}),
};
// Test model description:
test_case.model.main_op = reduction_factories;
test_case.model.preprocess_outputs_of_main = {{set_transpose_for}, {{0}}};
test_case.model.model_template = transpose_sinking::common::create_model;
// Reference model description:
auto new_constant = [](const vector<size_t>& idxs, const OutputVector& out_vec) -> OutputVector {
OutputVector new_out_vec(out_vec.size());
new_out_vec[0] = out_vec[0];
new_out_vec[1] =
make_shared<Constant>(out_vec[1].get_element_type(), out_vec[1].get_shape(), std::vector<int64_t>{2, 0});
return new_out_vec;
};
test_case.model_ref.preprocess_inputs_to_main = {{set_transpose_for, new_constant}, {{0}, {1}}};
test_case.model_ref.main_op = reduction_factories;
test_case.model_ref.model_template = transpose_sinking::common::create_model;
return wrapper(test_case);
};
INSTANTIATE_TEST_SUITE_P(TransposeSinkingCommonReductionBackward, TransposeSinkingTestFixture, test_backward_reduction());
auto test_backward_interpolate = []() {
TestCase test_case;
// Initialize common attributes
test_case.transformation = CREATE_PASS_FACTORY(TransposeSinkingInterpolateBackward);
test_case.num_main_ops = {1};
test_case.inputs_to_main = {
parameter(element::f32, {1, 2, 48, 80}),
constant<int64_t>(element::i32, {2}, {24, 160}),
constant<float>(element::f32, {2}, {0.5, 2.}),
constant<int64_t>(element::i32, {2}, {1, 2}),
};
// Test model description:
test_case.model.main_op = {CREATE_INTERPOLATE_FACTORY(Interpolate, true)};
test_case.model.preprocess_outputs_of_main = {{set_transpose_for}, {{0}}};
test_case.model.model_template = transpose_sinking::common::create_model;
// Reference model description:
auto set_specific_gather_for = [](const vector<size_t>& idxs, const OutputVector& out_vec) -> OutputVector {
OutputVector result = out_vec;
for (const auto& idx : idxs) {
const auto& out = out_vec[idx];
vector<int64_t> transpose_order(out_vec[0].get_shape().size());
iota(transpose_order.begin(), transpose_order.end(), 0);
reverse(transpose_order.begin(), transpose_order.end());
auto data = make_shared<Constant>(element::i32, Shape{transpose_order.size()}, transpose_order);
auto axis = make_shared<Constant>(element::i32, Shape{}, 0);
auto transpose = make_shared<Gather>(data, out, axis);
result[idx] = transpose;
}
return result;
};
test_case.model_ref.preprocess_inputs_to_main = {{set_transpose_for, set_specific_gather_for}, {{0}, {3}}};
test_case.model_ref.main_op = {CREATE_INTERPOLATE_FACTORY(Interpolate, false)};
test_case.model_ref.model_template = transpose_sinking::common::create_model;
return wrapper(test_case);
};
INSTANTIATE_TEST_SUITE_P(TransposeSinkingCommonInterpolateBackward, TransposeSinkingTestFixture, test_backward_interpolate());
}
}

View File

@ -16,10 +16,9 @@
using namespace ov;
using namespace ov::opset10;
using namespace transpose_sinking::testing;
namespace {
using NodePtr = std::shared_ptr<ov::Node>;
using ModelPtr = std::shared_ptr<Model>;
std::vector<size_t> concat_operations_numbers = {1, 10};

View File

@ -4,7 +4,7 @@
#include <functional>
#include <openvino/frontend/manager.hpp>
#include <openvino/opsets/opset9.hpp>
#include <openvino/opsets/opset10.hpp>
#include <openvino/pass/manager.hpp>
#include <transformations/common_optimizations/transpose_sinking_general.hpp>
#include <transformations/init_node_info.hpp>
@ -13,39 +13,43 @@
#include "gtest/gtest.h"
using namespace testing;
using namespace ov::opset10;
using NodePtr = std::shared_ptr<ov::Node>;
namespace transpose_sinking {
namespace testing {
namespace general {
TEST_F(TransformationTestsF, TransposeSinkingGeneralTestUnariesTransposesForward) {
ov::Shape input_shape = {1, 96, 55, 55};
ov::element::Type input_type = ov::element::f32;
size_t num_unary_ops = 10;
{
auto X = std::make_shared<ov::opset9::Parameter>(input_type, input_shape);
auto X = std::make_shared<Parameter>(input_type, input_shape);
NodePtr in_op = X;
for (size_t i = 0; i < num_unary_ops; ++i) {
auto ng_order0 =
std::make_shared<ov::opset9::Constant>(ov::element::u64, ov::Shape{4}, ov::Shape{0, 2, 3, 1});
auto transpose0 = std::make_shared<ov::opset9::Transpose>(in_op, ng_order0);
std::make_shared<Constant>(ov::element::u64, ov::Shape{4}, ov::Shape{0, 2, 3, 1});
auto transpose0 = std::make_shared<Transpose>(in_op, ng_order0);
auto unary = std::make_shared<ov::opset9::Tanh>(transpose0);
auto unary = std::make_shared<Tanh>(transpose0);
auto ng_order1 =
std::make_shared<ov::opset9::Constant>(ov::element::u64, ov::Shape{4}, ov::Shape{0, 3, 1, 2});
in_op = std::make_shared<ov::opset9::Transpose>(unary, ng_order1);
std::make_shared<Constant>(ov::element::u64, ov::Shape{4}, ov::Shape{0, 3, 1, 2});
in_op = std::make_shared<Transpose>(unary, ng_order1);
}
function = std::make_shared<ov::Model>(in_op, ov::ParameterVector{X});
}
{
auto X = std::make_shared<ov::opset9::Parameter>(input_type, input_shape);
auto X = std::make_shared<Parameter>(input_type, input_shape);
NodePtr in_op = X;
for (size_t i = 0; i < num_unary_ops; ++i) {
in_op = std::make_shared<ov::opset9::Tanh>(in_op);
in_op = std::make_shared<Tanh>(in_op);
}
function_ref = std::make_shared<ov::Model>(in_op, ov::ParameterVector{X});
@ -60,35 +64,34 @@ TEST_F(TransformationTestsF, TransposeSinkingGeneralTestUnariesTransposesBackwar
size_t num_unary_ops = 10;
{
auto X = std::make_shared<ov::opset9::Parameter>(input_type, input_shape);
auto X = std::make_shared<Parameter>(input_type, input_shape);
NodePtr in_op = X;
for (size_t i = 0; i < num_unary_ops; ++i) {
auto ng_order0 =
std::make_shared<ov::opset9::Constant>(ov::element::u64, ov::Shape{4}, ov::Shape{0, 2, 3, 1});
auto transpose0 = std::make_shared<ov::opset9::Transpose>(in_op, ng_order0);
std::make_shared<Constant>(ov::element::u64, ov::Shape{4}, ov::Shape{0, 2, 3, 1});
auto transpose0 = std::make_shared<Transpose>(in_op, ng_order0);
auto unary = std::make_shared<ov::opset9::Tanh>(transpose0);
auto unary = std::make_shared<Tanh>(transpose0);
auto ng_order1 =
std::make_shared<ov::opset9::Constant>(ov::element::u64, ov::Shape{4}, ov::Shape{0, 3, 1, 2});
in_op = std::make_shared<ov::opset9::Transpose>(unary, ng_order1);
std::make_shared<Constant>(ov::element::u64, ov::Shape{4}, ov::Shape{0, 3, 1, 2});
in_op = std::make_shared<Transpose>(unary, ng_order1);
}
function = std::make_shared<ov::Model>(in_op, ov::ParameterVector{X});
}
{
auto X = std::make_shared<ov::opset9::Parameter>(input_type, input_shape);
auto X = std::make_shared<Parameter>(input_type, input_shape);
NodePtr in_op = X;
for (size_t i = 0; i < num_unary_ops; ++i) {
in_op = std::make_shared<ov::opset9::Tanh>(in_op);
in_op = std::make_shared<Tanh>(in_op);
}
function_ref = std::make_shared<ov::Model>(in_op, ov::ParameterVector{X});
}
manager.register_pass<ov::pass::TransposeSinkingGeneralBackward>();
}
@ -98,37 +101,37 @@ TEST_F(TransformationTestsF, TransposeSinkingGeneralTestUnariesTransposesGeneral
size_t num_unary_ops = 10;
{
auto X = std::make_shared<ov::opset9::Parameter>(input_type, input_shape);
auto X = std::make_shared<Parameter>(input_type, input_shape);
auto ng_order0 = std::make_shared<ov::opset9::Constant>(ov::element::u64, ov::Shape{4}, ov::Shape{0, 2, 3, 1});
auto transpose0 = std::make_shared<ov::opset9::Transpose>(X, ng_order0);
auto ng_order0 = std::make_shared<Constant>(ov::element::u64, ov::Shape{4}, ov::Shape{0, 2, 3, 1});
auto transpose0 = std::make_shared<Transpose>(X, ng_order0);
NodePtr in_op = transpose0;
for (size_t i = 0; i < num_unary_ops; ++i) {
auto ng_order0 =
std::make_shared<ov::opset9::Constant>(ov::element::u64, ov::Shape{4}, ov::Shape{0, 2, 3, 1});
auto transpose0 = std::make_shared<ov::opset9::Transpose>(in_op, ng_order0);
std::make_shared<Constant>(ov::element::u64, ov::Shape{4}, ov::Shape{0, 2, 3, 1});
auto transpose0 = std::make_shared<Transpose>(in_op, ng_order0);
auto unary = std::make_shared<ov::opset9::Tanh>(transpose0);
auto unary = std::make_shared<Tanh>(transpose0);
auto ng_order1 =
std::make_shared<ov::opset9::Constant>(ov::element::u64, ov::Shape{4}, ov::Shape{0, 3, 1, 2});
in_op = std::make_shared<ov::opset9::Transpose>(unary, ng_order1);
std::make_shared<Constant>(ov::element::u64, ov::Shape{4}, ov::Shape{0, 3, 1, 2});
in_op = std::make_shared<Transpose>(unary, ng_order1);
}
function = std::make_shared<ov::Model>(in_op, ov::ParameterVector{X});
}
{
auto X = std::make_shared<ov::opset9::Parameter>(input_type, input_shape);
auto X = std::make_shared<Parameter>(input_type, input_shape);
NodePtr in_op = X;
for (size_t i = 0; i < num_unary_ops; ++i) {
in_op = std::make_shared<ov::opset9::Tanh>(in_op);
in_op = std::make_shared<Tanh>(in_op);
}
auto ng_order0 = std::make_shared<ov::opset9::Constant>(ov::element::u64, ov::Shape{4}, ov::Shape{0, 2, 3, 1});
auto transpose0 = std::make_shared<ov::opset9::Transpose>(in_op, ng_order0);
auto ng_order0 = std::make_shared<Constant>(ov::element::u64, ov::Shape{4}, ov::Shape{0, 2, 3, 1});
auto transpose0 = std::make_shared<Transpose>(in_op, ng_order0);
function_ref = std::make_shared<ov::Model>(transpose0, ov::ParameterVector{X});
}
@ -142,35 +145,35 @@ TEST_F(TransformationTestsF, TransposeSinkingGeneralTestBinaryGeneral) {
size_t num_binary_ops = 10;
{
auto X = std::make_shared<ov::opset9::Parameter>(input_type, input_shape);
auto X = std::make_shared<Parameter>(input_type, input_shape);
auto ng_order0 = std::make_shared<ov::opset9::Constant>(ov::element::u64, ov::Shape{4}, ov::Shape{0, 2, 3, 1});
auto transpose0 = std::make_shared<ov::opset9::Transpose>(X, ng_order0);
auto ng_order0 = std::make_shared<Constant>(ov::element::u64, ov::Shape{4}, ov::Shape{0, 2, 3, 1});
auto transpose0 = std::make_shared<Transpose>(X, ng_order0);
NodePtr in_op = transpose0;
for (size_t i = 0; i < num_binary_ops; ++i) {
auto in_constant = std::make_shared<ov::opset9::Constant>(input_type, input_shape, ov::Shape{1});
auto in_constant = std::make_shared<Constant>(input_type, input_shape, ov::Shape{1});
auto ng_order1 =
std::make_shared<ov::opset9::Constant>(ov::element::u64, ov::Shape{4}, ov::Shape{0, 2, 3, 1});
auto transpose1 = std::make_shared<ov::opset9::Transpose>(in_constant, ng_order1);
std::make_shared<Constant>(ov::element::u64, ov::Shape{4}, ov::Shape{0, 2, 3, 1});
auto transpose1 = std::make_shared<Transpose>(in_constant, ng_order1);
in_op = std::make_shared<ov::opset9::Add>(in_op, transpose1);
in_op = std::make_shared<Add>(in_op, transpose1);
}
function = std::make_shared<ov::Model>(in_op, ov::ParameterVector{X});
}
{
auto X = std::make_shared<ov::opset9::Parameter>(input_type, input_shape);
auto X = std::make_shared<Parameter>(input_type, input_shape);
NodePtr in_op = X;
for (size_t i = 0; i < num_binary_ops; ++i) {
auto in_constant = std::make_shared<ov::opset9::Constant>(input_type, input_shape, ov::Shape{1});
in_op = std::make_shared<ov::opset9::Add>(in_op, in_constant);
auto in_constant = std::make_shared<Constant>(input_type, input_shape, ov::Shape{1});
in_op = std::make_shared<Add>(in_op, in_constant);
}
auto ng_order0 = std::make_shared<ov::opset9::Constant>(ov::element::u64, ov::Shape{4}, ov::Shape{0, 2, 3, 1});
auto transpose0 = std::make_shared<ov::opset9::Transpose>(in_op, ng_order0);
auto ng_order0 = std::make_shared<Constant>(ov::element::u64, ov::Shape{4}, ov::Shape{0, 2, 3, 1});
auto transpose0 = std::make_shared<Transpose>(in_op, ng_order0);
function_ref = std::make_shared<ov::Model>(transpose0, ov::ParameterVector{X});
}
@ -185,30 +188,30 @@ TEST_F(TransformationTestsF, TransposeSinkingGeneralTestConcatGeneral) {
const size_t num_concat_inputs = 2;
{
auto X = std::make_shared<ov::opset9::Parameter>(input_type, input_shape);
auto X = std::make_shared<Parameter>(input_type, input_shape);
auto ng_order0 = std::make_shared<ov::opset9::Constant>(ov::element::u64, ov::Shape{4}, ov::Shape{0, 2, 3, 1});
auto transpose0 = std::make_shared<ov::opset9::Transpose>(X, ng_order0);
auto ng_order0 = std::make_shared<Constant>(ov::element::u64, ov::Shape{4}, ov::Shape{0, 2, 3, 1});
auto transpose0 = std::make_shared<Transpose>(X, ng_order0);
NodePtr in_op = transpose0;
for (size_t i = 0; i < num_concat_ops; ++i) {
ov::OutputVector concat_inputs;
concat_inputs.push_back(in_op);
for (size_t j = 1; j < num_concat_inputs; ++j) {
auto in_constant = std::make_shared<ov::opset9::Constant>(input_type, input_shape, ov::Shape{1});
auto in_constant = std::make_shared<Constant>(input_type, input_shape, ov::Shape{1});
auto ng_order1 =
std::make_shared<ov::opset9::Constant>(ov::element::u64, ov::Shape{4}, ov::Shape{0, 2, 3, 1});
auto transpose1 = std::make_shared<ov::opset9::Transpose>(in_constant, ng_order1);
std::make_shared<Constant>(ov::element::u64, ov::Shape{4}, ov::Shape{0, 2, 3, 1});
auto transpose1 = std::make_shared<Transpose>(in_constant, ng_order1);
concat_inputs.push_back(transpose1);
}
in_op = std::make_shared<ov::opset9::Concat>(concat_inputs, 1);
in_op = std::make_shared<Concat>(concat_inputs, 1);
}
function = std::make_shared<ov::Model>(in_op, ov::ParameterVector{X});
}
{
auto X = std::make_shared<ov::opset9::Parameter>(input_type, input_shape);
auto X = std::make_shared<Parameter>(input_type, input_shape);
NodePtr in_op = X;
for (size_t i = 0; i < num_concat_ops; ++i) {
@ -217,14 +220,14 @@ TEST_F(TransformationTestsF, TransposeSinkingGeneralTestConcatGeneral) {
concat_inputs.push_back(in_op);
for (size_t j = 1; j < num_concat_inputs; ++j) {
auto in_constant = std::make_shared<ov::opset9::Constant>(input_type, input_shape, ov::Shape{1});
auto in_constant = std::make_shared<Constant>(input_type, input_shape, ov::Shape{1});
concat_inputs.push_back(in_constant);
}
in_op = std::make_shared<ov::opset9::Concat>(concat_inputs, 2);
in_op = std::make_shared<Concat>(concat_inputs, 2);
}
auto ng_order0 = std::make_shared<ov::opset9::Constant>(ov::element::u64, ov::Shape{4}, ov::Shape{0, 2, 3, 1});
auto transpose0 = std::make_shared<ov::opset9::Transpose>(in_op, ng_order0);
auto ng_order0 = std::make_shared<Constant>(ov::element::u64, ov::Shape{4}, ov::Shape{0, 2, 3, 1});
auto transpose0 = std::make_shared<Transpose>(in_op, ng_order0);
function_ref = std::make_shared<ov::Model>(transpose0, ov::ParameterVector{X});
}
@ -240,7 +243,7 @@ public:
virtual NodePtr create(const ov::OutputVector& parent) = 0;
virtual size_t getNumInputs() const = 0;
virtual size_t getNumOuputs() const = 0;
virtual size_t getNumOutputs() const = 0;
};
using FactoryPtr = std::shared_ptr<IFactory>;
@ -248,7 +251,7 @@ using FactoryPtr = std::shared_ptr<IFactory>;
class UnaryFactory : public IFactory {
public:
NodePtr create(const ov::OutputVector& parent) override {
return std::make_shared<ov::opset9::Sinh>(parent.front());
return std::make_shared<Sinh>(parent.front());
}
static FactoryPtr createFactory() {
@ -258,7 +261,7 @@ public:
size_t getNumInputs() const override {
return 1;
}
size_t getNumOuputs() const override {
size_t getNumOutputs() const override {
return 1;
}
};
@ -266,7 +269,7 @@ public:
class BinaryFactory : public IFactory {
public:
NodePtr create(const ov::OutputVector& parent) override {
return std::make_shared<ov::opset9::Add>(parent[0], parent[1]);
return std::make_shared<Add>(parent[0], parent[1]);
}
static FactoryPtr createFactory() {
@ -276,17 +279,17 @@ public:
size_t getNumInputs() const override {
return 2;
}
size_t getNumOuputs() const override {
size_t getNumOutputs() const override {
return 1;
}
};
class SplitFactory : public IFactory {
public:
SplitFactory(size_t axis) : axis_(axis) {}
explicit SplitFactory(size_t axis) : axis_(axis) {}
NodePtr create(const ov::OutputVector& parent) override {
auto split_axis_const = std::make_shared<ov::opset9::Constant>(ov::element::u64, ov::Shape{}, axis_);
return std::make_shared<ov::opset9::Split>(parent.front(), split_axis_const, 2);
auto split_axis_const = std::make_shared<Constant>(ov::element::u64, ov::Shape{}, axis_);
return std::make_shared<Split>(parent.front(), split_axis_const, 2);
}
static FactoryPtr createFactory(size_t axis) {
@ -296,7 +299,7 @@ public:
size_t getNumInputs() const override {
return 1;
}
size_t getNumOuputs() const override {
size_t getNumOutputs() const override {
return 2;
}
@ -306,9 +309,9 @@ private:
class ConcatFactory : public IFactory {
public:
ConcatFactory(size_t axis) : axis_(axis) {}
explicit ConcatFactory(size_t axis) : axis_(axis) {}
NodePtr create(const ov::OutputVector& parent) override {
return std::make_shared<ov::opset9::Concat>(parent, axis_);
return std::make_shared<Concat>(parent, axis_);
}
static FactoryPtr createFactory(size_t axis) {
@ -318,7 +321,7 @@ public:
size_t getNumInputs() const override {
return 2;
}
size_t getNumOuputs() const override {
size_t getNumOutputs() const override {
return 1;
}
@ -330,8 +333,8 @@ private:
Each node pair should be started with input size = 1 node and finished with node output size = 1
Insert Split/Concat to fullfill that.
*/
NodePtr CreateNodePair(FactoryPtr factory_first,
FactoryPtr factory_second,
NodePtr CreateNodePair(const FactoryPtr& factory_first,
const FactoryPtr& factory_second,
NodePtr parent,
size_t split_axis,
size_t concat_axis) {
@ -341,9 +344,9 @@ NodePtr CreateNodePair(FactoryPtr factory_first,
}
input = factory_first->create(input->outputs());
if (factory_first->getNumOuputs() < factory_second->getNumInputs()) {
if (factory_first->getNumOutputs() < factory_second->getNumInputs()) {
input = SplitFactory(split_axis).create(input->outputs());
} else if (factory_first->getNumOuputs() > factory_second->getNumInputs()) {
} else if (factory_first->getNumOutputs() > factory_second->getNumInputs()) {
input = ConcatFactory(concat_axis).create(input->outputs());
}
@ -370,23 +373,24 @@ NodePtr MakeAllNodesSubgraph(NodePtr parent, size_t split_axis, size_t concat_ax
}
TEST_F(TransformationTestsF, TransposeSinkingGeneralTestMultipleTypes) {
using namespace transpose_sinking::testing::general;
ov::Shape input_shape = {1, 96, 40, 55};
ov::element::Type input_type = ov::element::f32;
{
auto X = std::make_shared<ov::opset9::Parameter>(input_type, input_shape);
auto X = std::make_shared<Parameter>(input_type, input_shape);
auto node0 = MakeAllNodesSubgraph(X, 1, 1);
auto ng_order0 = std::make_shared<ov::opset9::Constant>(ov::element::u64, ov::Shape{4}, ov::Shape{0, 2, 3, 1});
auto transpose0 = std::make_shared<ov::opset9::Transpose>(node0, ng_order0);
auto ng_order0 = std::make_shared<Constant>(ov::element::u64, ov::Shape{4}, ov::Shape{0, 2, 3, 1});
auto transpose0 = std::make_shared<Transpose>(node0, ng_order0);
auto reshape_const =
std::make_shared<ov::opset9::Constant>(ov::element::u64, ov::Shape{4}, ov::Shape{1, 40, 55, 96});
auto reshape = std::make_shared<ov::opset9::Reshape>(transpose0, reshape_const, false);
std::make_shared<Constant>(ov::element::u64, ov::Shape{4}, ov::Shape{1, 40, 55, 96});
auto reshape = std::make_shared<Reshape>(transpose0, reshape_const, false);
auto ng_order1 = std::make_shared<ov::opset9::Constant>(ov::element::u64, ov::Shape{4}, ov::Shape{0, 3, 1, 2});
auto transpose1 = std::make_shared<ov::opset9::Transpose>(reshape, ng_order1);
auto ng_order1 = std::make_shared<Constant>(ov::element::u64, ov::Shape{4}, ov::Shape{0, 3, 1, 2});
auto transpose1 = std::make_shared<Transpose>(reshape, ng_order1);
auto node1 = MakeAllNodesSubgraph(transpose1, 1, 1);
@ -394,24 +398,28 @@ TEST_F(TransformationTestsF, TransposeSinkingGeneralTestMultipleTypes) {
}
{
auto X = std::make_shared<ov::opset9::Parameter>(input_type, input_shape);
auto X = std::make_shared<Parameter>(input_type, input_shape);
auto ng_order0 = std::make_shared<ov::opset9::Constant>(ov::element::u64, ov::Shape{4}, ov::Shape{0, 2, 3, 1});
auto transpose0 = std::make_shared<ov::opset9::Transpose>(X, ng_order0);
auto ng_order0 = std::make_shared<Constant>(ov::element::u64, ov::Shape{4}, ov::Shape{0, 2, 3, 1});
auto transpose0 = std::make_shared<Transpose>(X, ng_order0);
auto node0 = MakeAllNodesSubgraph(transpose0, 3, 3);
auto reshape_const =
std::make_shared<ov::opset9::Constant>(ov::element::u64, ov::Shape{4}, ov::Shape{1, 40, 55, 96});
auto reshape = std::make_shared<ov::opset9::Reshape>(node0, reshape_const, false);
std::make_shared<Constant>(ov::element::u64, ov::Shape{4}, ov::Shape{1, 40, 55, 96});
auto reshape = std::make_shared<Reshape>(node0, reshape_const, false);
auto node1 = MakeAllNodesSubgraph(reshape, 3, 3);
auto ng_order1 = std::make_shared<ov::opset9::Constant>(ov::element::u64, ov::Shape{4}, ov::Shape{0, 3, 1, 2});
auto transpose1 = std::make_shared<ov::opset9::Transpose>(node1, ng_order1);
auto ng_order1 = std::make_shared<Constant>(ov::element::u64, ov::Shape{4}, ov::Shape{0, 3, 1, 2});
auto transpose1 = std::make_shared<Transpose>(node1, ng_order1);
function_ref = std::make_shared<ov::Model>(transpose1, ov::ParameterVector{X});
}
manager.register_pass<ov::pass::TransposeSinkingGeneral>();
}
}
}
}

View File

@ -1,284 +0,0 @@
// Copyright (C) 2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <functional>
#include <openvino/frontend/manager.hpp>
#include <openvino/opsets/opset10.hpp>
#include <openvino/pass/constant_folding.hpp>
#include <openvino/pass/manager.hpp>
#include <transformations/common_optimizations/transpose_sinking_data_movement.hpp>
#include <transformations/common_optimizations/transpose_sinking_utils.hpp>
#include <transformations/init_node_info.hpp>
#include "common_test_utils/ngraph_test_utils.hpp"
#include "gtest/gtest.h"
#include "transpose_sinking_test_utils.hpp"
using namespace std;
using namespace ov;
using namespace opset10;
namespace transpose_sinking_pad {
namespace {
using NodePtr = shared_ptr<Node>;
using ModelPtr = shared_ptr<Model>;
vector<int64_t> TransposePadValues(const vector<int64_t>& pads, const vector<size_t>& order) {
vector<int64_t> new_pads(pads.size());
for (size_t i = 0; i < pads.size(); ++i) {
new_pads[i] = pads[order[i]];
}
return new_pads;
};
} // namespace
namespace forward {
namespace single_consumer {
shared_ptr<Model> CreateFunction(size_t num_pad_ops, element::Type input_type) {
const Shape input_shape{96, 32, 55, 55};
auto X = make_shared<Parameter>(input_type, input_shape);
auto order = make_shared<Constant>(element::i64, Shape{4}, Shape{0, 3, 1, 2});
auto transpose = make_shared<Transpose>(X, order); // 96 55 32 55
OutputVector outputs;
Output<Node> in_op = transpose->output(0);
auto pad_value = make_shared<Constant>(input_type, Shape{}, 0);
for (size_t i = 0; i < num_pad_ops; ++i) {
auto pad_begin_const = make_shared<Constant>(element::i64, Shape{4}, vector<int64_t>{95, 54, 31, 53});
auto pad_end_const = make_shared<Constant>(element::i64, Shape{4}, vector<int64_t>{95, 54, 31, 53});
auto pad = make_shared<Pad>(in_op, pad_begin_const, pad_end_const, pad_value, ov::op::PadMode::REFLECT);
outputs.push_back((pad->output(0)));
in_op = pad;
}
outputs.push_back(in_op);
return make_shared<Model>(outputs, ParameterVector{X});
}
shared_ptr<Model> CreateReferenceFunction(size_t num_pad_ops, element::Type input_type) {
const Shape input_shape{96, 32, 55, 55};
auto X = make_shared<Parameter>(input_type, input_shape);
OutputVector outputs;
Output<Node> in_op = X->output(0);
vector<int64_t> pads{95, 54, 31, 53};
auto transpose_pad_values = [&](const vector<size_t>& order) {
vector<int64_t> new_pads(pads.size());
for (size_t i = 0; i < pads.size(); ++i) {
new_pads[i] = pads[order[i]];
}
return new_pads;
};
auto axis = make_shared<Constant>(element::i64, Shape{}, 0);
auto pad_value = make_shared<Constant>(input_type, Shape{}, 0);
vector<size_t> order_val = {0, 3, 1, 2};
for (size_t i = 0; i < num_pad_ops; ++i) {
auto pad_begin_const = make_shared<Constant>(element::i64, Shape{4}, transpose_pad_values({0, 2, 3, 1}));
auto pad_end_const = make_shared<Constant>(element::i64, Shape{4}, transpose_pad_values({0, 2, 3, 1}));
auto pad = make_shared<Pad>(in_op, pad_begin_const, pad_end_const, pad_value, ov::op::PadMode::CONSTANT);
auto order = make_shared<Constant>(element::i64, Shape{4}, Shape{order_val});
auto transpose = make_shared<Transpose>(pad->output(0), order);
outputs.push_back(transpose);
in_op = pad;
}
auto order = make_shared<Constant>(element::i64, Shape{4}, order_val);
auto transpose = make_shared<Transpose>(in_op, order);
outputs.push_back(transpose);
auto ref = make_shared<Model>(outputs, ParameterVector{X});
ov::pass::Manager ps_manager;
ps_manager.run_passes(ref);
return ref;
}
} // namespace single_consumer
} // namespace forward
namespace backward {
namespace single_consumer {
shared_ptr<Model> CreateFunction(size_t num_pad_ops, element::Type input_type) {
const Shape input_shape{96, 32, 55, 55};
auto X = make_shared<Parameter>(input_type, input_shape);
OutputVector outputs;
Output<Node> in_op = X->output(0);
auto pad_value = make_shared<Constant>(input_type, Shape{}, 0);
for (size_t i = 0; i < num_pad_ops; ++i) {
auto pad_begin_const = make_shared<Constant>(element::i64, Shape{4}, vector<int64_t>{0, 1, 2, 3});
auto pad_end_const = make_shared<Constant>(element::i64, Shape{4}, vector<int64_t>{0, 1, 2, 3});
auto pad = make_shared<Pad>(in_op, pad_begin_const, pad_end_const, pad_value, ov::op::PadMode::CONSTANT);
in_op = pad;
}
auto order = make_shared<Constant>(element::i64, Shape{4}, vector<int64_t>{0, 3, 1, 2});
auto transpose = make_shared<Transpose>(in_op, order);
auto relu = make_shared<Relu>(transpose);
outputs.push_back(relu);
return make_shared<Model>(outputs, ParameterVector{X});
}
shared_ptr<Model> CreateReferenceFunction(size_t num_pad_ops, element::Type input_type) {
const Shape input_shape{96, 32, 55, 55};
auto X = make_shared<Parameter>(input_type, input_shape);
vector<size_t> order_val = {0, 3, 1, 2};
auto order = make_shared<Constant>(element::i64, Shape{4}, order_val);
auto transpose = make_shared<Transpose>(X, order);
OutputVector outputs;
Output<Node> in_op = transpose->output(0);
vector<int64_t> pads{0, 1, 2, 3};
auto axis = make_shared<Constant>(element::i64, Shape{}, 0);
auto pad_value = make_shared<Constant>(input_type, Shape{}, 0);
for (size_t i = 0; i < num_pad_ops; ++i) {
auto pad_begin_const = make_shared<Constant>(element::i64, Shape{4}, TransposePadValues(pads, order_val));
auto pad_end_const = make_shared<Constant>(element::i64, Shape{4}, TransposePadValues(pads, order_val));
auto pad = make_shared<Pad>(in_op, pad_begin_const, pad_end_const, pad_value, ov::op::PadMode::CONSTANT);
in_op = pad;
}
auto relu = make_shared<Relu>(in_op);
outputs.push_back(relu);
auto ref = make_shared<Model>(outputs, ParameterVector{X});
return ref;
}
} // namespace single_consumer
namespace output_transpose_mult_transposes {
shared_ptr<Model> CreateFunction(size_t num_pad_ops, element::Type input_type) {
const Shape input_shape{96, 32, 55, 55};
auto X = make_shared<Parameter>(input_type, input_shape);
OutputVector outputs;
Output<Node> in_op = X->output(0);
auto pad_value = make_shared<Constant>(input_type, Shape{}, 0);
for (size_t i = 0; i < num_pad_ops; ++i) {
auto pad_begin_const = make_shared<Constant>(element::i64, Shape{4}, vector<int64_t>{0, 1, 2, 3});
auto pad_end_const = make_shared<Constant>(element::i64, Shape{4}, vector<int64_t>{0, 1, 2, 3});
auto pad = make_shared<Pad>(in_op, pad_begin_const, pad_end_const, pad_value, ov::op::PadMode::CONSTANT);
in_op = pad;
}
auto order = make_shared<Constant>(element::i64, Shape{4}, vector<int64_t>{0, 3, 1, 2});
auto transpose_1 = make_shared<Transpose>(in_op, order);
auto relu_1 = make_shared<Relu>(transpose_1);
outputs.push_back(relu_1);
auto transpose_2 = make_shared<Transpose>(in_op, order);
auto relu_2 = make_shared<Relu>(transpose_2);
outputs.push_back(relu_2);
return make_shared<Model>(outputs, ParameterVector{X});
}
shared_ptr<Model> CreateReferenceFunction(size_t num_pad_ops, element::Type input_type) {
const Shape input_shape{96, 32, 55, 55};
auto X = make_shared<Parameter>(input_type, input_shape);
vector<size_t> order_val = {0, 3, 1, 2};
auto order = make_shared<Constant>(element::i64, Shape{4}, order_val);
auto transpose = make_shared<Transpose>(X, order);
OutputVector outputs;
Output<Node> in_op = transpose->output(0);
vector<int64_t> pads{0, 1, 2, 3};
auto axis = make_shared<Constant>(element::i64, Shape{}, 0);
auto pad_value = make_shared<Constant>(input_type, Shape{}, 0);
for (size_t i = 0; i < num_pad_ops; ++i) {
auto pad_begin_const = make_shared<Constant>(element::i64, Shape{4}, TransposePadValues(pads, order_val));
auto pad_end_const = make_shared<Constant>(element::i64, Shape{4}, TransposePadValues(pads, order_val));
auto pad = make_shared<Pad>(in_op, pad_begin_const, pad_end_const, pad_value, ov::op::PadMode::CONSTANT);
in_op = pad;
}
auto relu_1 = make_shared<Relu>(in_op);
auto relu_2 = make_shared<Relu>(in_op);
outputs.push_back(relu_1);
outputs.push_back(relu_2);
auto ref = make_shared<Model>(outputs, ParameterVector{X});
return ref;
}
} // namespace output_transpose_mult_transposes
} // namespace backward
using CreateGraphPadF = function<shared_ptr<Model>(size_t num_pad_ops, element::Type input_type)>;
using TestPadParams = tuple<PassFactoryPtr,
size_t, /* num_pad_ops */
CreateGraphPadF, /* model_factory */
CreateGraphPadF, /* reference_model_factory */
element::Type> /* input type */;
class TransposeSinkingPadTestFixture : public ::testing::WithParamInterface<TestPadParams>,
public TransformationTestsF {
public:
static string get_test_name(const testing::TestParamInfo<TestPadParams>& obj) {
PassFactoryPtr pass_factory;
size_t num_pad_ops;
CreateGraphPadF model_factory;
CreateGraphPadF reference_model_factory;
element::Type input_type;
tie(pass_factory, num_pad_ops, model_factory, reference_model_factory, input_type) = obj.param;
ostringstream test_name;
test_name << "pass_factory=" << pass_factory->getTypeName() << "_";
test_name << "num_pad_ops=" << num_pad_ops << "_";
test_name << "input_type=" << input_type;
return test_name.str();
}
};
TEST_P(TransposeSinkingPadTestFixture, CompareFunctions) {
PassFactoryPtr pass_factory;
size_t num_pad_ops;
CreateGraphPadF model_factory;
CreateGraphPadF reference_model_factory;
element::Type input_type;
tie(pass_factory, num_pad_ops, model_factory, reference_model_factory, input_type) = this->GetParam();
model = model_factory(num_pad_ops, input_type);
model_ref = reference_model_factory(num_pad_ops, input_type);
pass_factory->registerPass(manager);
}
std::vector<size_t> pad_operations_numbers = {1, 10};
INSTANTIATE_TEST_SUITE_P(TransposeSinkingPadForwardSingleConsumerTestSuite,
TransposeSinkingPadTestFixture,
::testing::Combine(::testing::Values(CREATE_PASS_FACTORY(TransposeSinkingDataMovementForward)),
::testing::ValuesIn(pad_operations_numbers),
::testing::Values(forward::single_consumer::CreateFunction),
::testing::Values(forward::single_consumer::CreateReferenceFunction),
::testing::Values(element::f32)),
TransposeSinkingPadTestFixture::get_test_name);
INSTANTIATE_TEST_SUITE_P(
TransposeSinkingPadBackwardSingleConsumerTestSuite,
TransposeSinkingPadTestFixture,
::testing::Combine(::testing::Values(CREATE_PASS_FACTORY(TransposeSinkingDataMovementBackward)),
::testing::ValuesIn(pad_operations_numbers),
::testing::Values(backward::single_consumer::CreateFunction),
::testing::Values(backward::single_consumer::CreateReferenceFunction),
::testing::Values(element::f32)),
TransposeSinkingPadTestFixture::get_test_name);
INSTANTIATE_TEST_SUITE_P(
TransposeSinkingPadBackwardSingleConsumerMultiTransposesTestSuite,
TransposeSinkingPadTestFixture,
::testing::Combine(::testing::Values(CREATE_PASS_FACTORY(TransposeSinkingDataMovementBackward)),
::testing::ValuesIn(pad_operations_numbers),
::testing::Values(backward::output_transpose_mult_transposes::CreateFunction),
::testing::Values(backward::output_transpose_mult_transposes::CreateReferenceFunction),
::testing::Values(element::f32)),
TransposeSinkingPadTestFixture::get_test_name);
} // namespace transpose_sinking_pad

View File

@ -16,14 +16,9 @@
using namespace ov;
using namespace ov::opset10;
namespace transpose_sinking_split {
namespace {
using NodePtr = std::shared_ptr<ov::Node>;
using ModelPtr = std::shared_ptr<Model>;
} // namespace
namespace transpose_sinking {
namespace testing {
namespace split {
std::vector<size_t> split_tree_depth_nums = {1, 3};
std::vector<size_t> split_operations_numbers = {1, 10};
@ -496,7 +491,7 @@ using TestSplitParams = std::tuple<PassFactoryPtr,
class TransposeSinkingSplitTestFixture : public ::testing::WithParamInterface<TestSplitParams>,
public TransformationTestsF {
public:
static std::string get_test_name(const testing::TestParamInfo<TestSplitParams>& obj) {
static std::string get_test_name(const ::testing::TestParamInfo<TestSplitParams>& obj) {
PassFactoryPtr pass_factory;
size_t num_split_ops;
size_t num_split_outputs;
@ -773,7 +768,7 @@ class TransposeSinkingSplitBackwardRestrictTestFixture
: public ::testing::WithParamInterface<TestSplitBackwardRestrictParams>,
public TransformationTestsF {
public:
static std::string get_test_name(const testing::TestParamInfo<TestSplitBackwardRestrictParams>& obj) {
static std::string get_test_name(const ::testing::TestParamInfo<TestSplitBackwardRestrictParams>& obj) {
PassFactoryPtr pass_factory;
size_t split_tree_depth;
size_t num_split_outputs;
@ -840,4 +835,6 @@ INSTANTIATE_TEST_SUITE_P(TransposeSinkingSplitBackwardRestrictTestSuite,
} // namespace backward
} // namespace transpose_sinking_split
}
}
}

View File

@ -14,6 +14,9 @@ using namespace std;
using namespace ov;
using namespace ov::opset10;
namespace transpose_sinking {
namespace testing {
shared_ptr<Node> create_main_node(const OutputVector& inputs, size_t num_ops, const FactoryPtr& creator) {
OutputVector current_inputs = inputs;
for (size_t i = 0; i < num_ops; ++i) {
@ -80,6 +83,5 @@ std::shared_ptr<ov::Node> parameter(ov::element::Type el_type, const PartialShap
return std::make_shared<Parameter>(el_type, ps);
}
shared_ptr<ov::Node> constant(ov::element::Type el_type, const Shape& shape, const vector<int64_t>& value) {
return make_shared<Constant>(el_type, shape, value);
}
}

View File

@ -11,9 +11,10 @@
#include "common_test_utils/ngraph_test_utils.hpp"
#include "gtest/gtest.h"
using NodePtr = std::shared_ptr<ov::Node>;
namespace transpose_sinking {
namespace testing {
std::string to_string(const ov::Shape& shape);
using NodePtr = std::shared_ptr<ov::Node>;
class IFactory {
public:
@ -28,7 +29,6 @@ public:
private:
const std::string type_name_;
};
using FactoryPtr = std::shared_ptr<IFactory>;
class IPassFactory {
@ -44,8 +44,6 @@ private:
const std::string type_name_;
};
using PassFactoryPtr = std::shared_ptr<IPassFactory>;
template <typename PassT>
class PassFactory : public IPassFactory {
public:
@ -54,18 +52,22 @@ public:
pass_manager.register_pass<PassT>();
}
};
using PassFactoryPtr = std::shared_ptr<IPassFactory>;
#define CREATE_PASS_FACTORY(pass_name) std::make_shared<PassFactory<ov::pass::pass_name>>(#pass_name)
std::string to_string(const ov::Shape& shape);
ov::OutputVector set_transpose_for(const std::vector<size_t>& idxs, const ov::OutputVector& out_vec);
ov::OutputVector set_gather_for(const std::vector<size_t>& idxs, const ov::OutputVector& out_vec);
std::shared_ptr<ov::Node> create_main_node(const ov::OutputVector& inputs, size_t num_ops, const FactoryPtr& creator);
ov::ParameterVector filter_parameters(const ov::OutputVector& out_vec);
std::shared_ptr<ov::Node> create_main_node(const ov::OutputVector& inputs, size_t num_ops, const FactoryPtr& creator);
std::shared_ptr<ov::Node> parameter(ov::element::Type el_type, const ov::PartialShape& ps);
template<class T>
std::shared_ptr<ov::Node> constant(ov::element::Type el_type,
const ov::Shape& shape,
const std::vector<int64_t>& value);
const std::vector<T>& value) {
return ov::opset10::Constant::create<T>(el_type, shape, value);
}
}
}

View File

@ -14,8 +14,11 @@
using namespace ov;
using namespace ov::opset10;
using namespace transpose_sinking::testing;
using NodePtr = std::shared_ptr<ov::Node>;
namespace transpose_sinking {
namespace testing {
namespace unary {
using CreateGraphF = std::function<std::shared_ptr<
ov::Model>(FactoryPtr unary_factory, size_t num_unary_ops, const Shape& input_shape, element::Type input_type)>;
@ -58,10 +61,6 @@ public:
}
};
namespace transpose_sinking {
namespace testing {
namespace unary {
template <typename UnaryT>
class UnaryFactory : public IFactory {
public:
@ -96,8 +95,6 @@ FactoryPtr CreateUnaryFactory(const std::string& type_name) {
// ----------------------------------------------------------------------------
namespace {
std::shared_ptr<ov::Model> CreateFunctionTransposeBefore(const FactoryPtr& unary_factory,
size_t num_unary_ops,
const Shape& input_shape,
@ -366,8 +363,6 @@ std::vector<FactoryPtr> unary_factories = {
CREATE_UNARY_FACTORY(Sinh), CREATE_UNARY_FACTORY(SoftSign), CREATE_UNARY_FACTORY(Sqrt),
CREATE_UNARY_FACTORY(Tan), CREATE_UNARY_FACTORY(Tanh)};
} // namespace
TEST_P(TransposeSinkingUnaryTestFixture, CompareFunctions) {
FactoryPtr unary_factory;
PassFactoryPtr pass_factory;
@ -517,9 +512,6 @@ auto test_forward_multiple_consumers_first_node = []() {
test_case.type = element::f32;
return wrapper(test_case);
};
} // namespace unary
} // namespace testing
} // namespace transpose_sinking
INSTANTIATE_TEST_SUITE_P(TransposeSinkingUnaryForwardTestSuite,
TransposeSinkingUnaryTestFixture,
@ -564,4 +556,8 @@ INSTANTIATE_TEST_SUITE_P(TransposeSinkingUnaryBackwardMultTransposeConsumersTest
INSTANTIATE_TEST_SUITE_P(TransposeSinkingUnaryForwardMultTransposeConsumersTestSuiteFirstNode,
TransposeSinkingUnaryTestFixture,
transpose_sinking::testing::unary::test_forward_multiple_consumers_first_node(),
TransposeSinkingUnaryTestFixture::get_test_name);
TransposeSinkingUnaryTestFixture::get_test_name);
} // namespace unary
} // namespace testing
} // namespace transpose_sinking

View File

@ -222,8 +222,7 @@ TEST_F(TransformationTestsF, InjectedBodyAndIf) {
}
}
// Ticket 101756
TEST_F(TransformationTestsF, DISABLED_ModelWithDilatedGroupConvolution) {
TEST_F(TransformationTestsF, ModelWithDilatedGroupConvolution) {
{
model = convert_model("dilated_gconv_model/dilated_gconv_model.pb");
// need to call MOC to fuse BatchToSpace/SpaceToBatch with GroupConvolution

View File

@ -140,6 +140,7 @@ class UniqueNamesHolder {
size_t m_index{0};
bool m_soft_names_comparison{false};
bool m_result_friendly_names_check{true};
std::string generate_tensor_name() {
return "tensor_" + std::to_string(m_index++);
@ -226,21 +227,24 @@ public:
}
}
// Check that result input node names are preserved
bool is_multi_output = m_result_node_names.at(r.get()).second;
const auto& ref_node_name = m_result_node_names.at(r.get()).first;
const auto& cur_node_name = r->input_value(0).get_node()->get_friendly_name();
if (is_multi_output || m_soft_names_comparison) {
if (cur_node_name.find(ref_node_name) == std::string::npos) {
if (m_result_friendly_names_check) {
// Check that result input node names are preserved
bool is_multi_output = m_result_node_names.at(r.get()).second;
const auto &ref_node_name = m_result_node_names.at(r.get()).first;
const auto &cur_node_name = r->input_value(0).get_node()->get_friendly_name();
if (is_multi_output || m_soft_names_comparison) {
if (cur_node_name.find(ref_node_name) == std::string::npos) {
std::stringstream ss;
ss << "Output node names mismatch: " << cur_node_name << " and " << ref_node_name
<< " (reference)";
throw ngraph_error(ss.str());
}
} else if (cur_node_name != ref_node_name) {
std::stringstream ss;
ss << "Output node names mismatch: " << cur_node_name << " and " << ref_node_name << " (reference)";
ss << "Output node names are different: " << cur_node_name << " and " << ref_node_name
<< " (reference)";
throw ngraph_error(ss.str());
}
} else if (cur_node_name != ref_node_name) {
std::stringstream ss;
ss << "Output node names are different: " << cur_node_name << " and " << ref_node_name
<< " (reference)";
throw ngraph_error(ss.str());
}
}
}
@ -248,6 +252,9 @@ public:
void enable_soft_names_comparison() {
m_soft_names_comparison = true;
}
void disable_result_friendly_names_check() {
m_result_friendly_names_check = false;
}
};
class InitUniqueNames : public ov::pass::ModelPass {
@ -265,9 +272,11 @@ class CheckUniqueNames : public ov::pass::ModelPass {
UniqueNamesHolder::Ptr m_unh;
public:
CheckUniqueNames(UniqueNamesHolder::Ptr unh, bool soft_names_comparison = false) : m_unh(unh) {
CheckUniqueNames(UniqueNamesHolder::Ptr unh, bool soft_names_comparison = false, bool result_friendly_names_check = true) : m_unh(unh) {
if (soft_names_comparison)
m_unh->enable_soft_names_comparison();
if (!result_friendly_names_check)
m_unh->disable_result_friendly_names_check();
}
bool run_on_model(const std::shared_ptr<Function>& f) override {
m_unh->check_unique_names(f);

View File

@ -31,7 +31,7 @@ void TransformationTestsF::TearDown() {
function_ref = cloned_function;
}
manager.register_pass<ngraph::pass::CheckUniqueNames>(m_unh, m_soft_names_comparison);
manager.register_pass<ngraph::pass::CheckUniqueNames>(m_unh, m_soft_names_comparison, m_result_friendly_names_check);
manager.run_passes(function);
if (!m_disable_rt_info_check) {
ASSERT_NO_THROW(check_rt_info(function));
@ -56,6 +56,10 @@ void TransformationTestsF::enable_soft_names_comparison() {
m_soft_names_comparison = true;
}
void TransformationTestsF::disable_result_friendly_names_check() {
m_result_friendly_names_check = false;
}
void init_unique_names(std::shared_ptr<ngraph::Function> f, const std::shared_ptr<ngraph::pass::UniqueNamesHolder>& unh) {
ngraph::pass::Manager manager;
manager.register_pass<ngraph::pass::InitUniqueNames>(unh);

View File

@ -39,6 +39,7 @@ public:
void disable_rt_info_check();
void enable_soft_names_comparison();
void disable_result_friendly_names_check();
std::shared_ptr<ov::Model> function, function_ref;
// Aliases to function and function_ref pointers to be more corresponding with ov namespace.
@ -50,6 +51,7 @@ private:
std::shared_ptr<ngraph::pass::UniqueNamesHolder> m_unh;
bool m_disable_rt_info_check{false};
bool m_soft_names_comparison{true};
bool m_result_friendly_names_check{true};
};
void init_unique_names(std::shared_ptr<ngraph::Function> f, const std::shared_ptr<ngraph::pass::UniqueNamesHolder>& unh);