codestyle

This commit is contained in:
Tikhonov Ivan 2023-02-12 17:48:05 +00:00
parent 2c48d6c583
commit 1de806f9f7
8 changed files with 29 additions and 69 deletions

View File

@ -1011,7 +1011,7 @@ static UNUSED void printPerformanceCounts(std::vector<ov::ProfilingInfo> perform
}
std::ios::fmtflags fmt(std::cout.flags());
stream << std::fixed << std::setprecision(precision);
std::map<std::string, std::pair<int, double>> layer_to_time;
for (const auto& it : performanceData) {
if (it.real_time.count() > 0) {
totalTime += it.real_time;
@ -1042,8 +1042,6 @@ static UNUSED void printPerformanceCounts(std::vector<ov::ProfilingInfo> perform
}
stream << "layerType: ";
layer_to_time[it.node_type].second += it.cpu_time.count() / 1000.0;
layer_to_time[it.node_type].first += 1;
if (it.node_type.length() >= maxPrintLength) {
stream << std::setw(maxPrintLength) << std::left << it.node_type.substr(0, maxPrintLength - 3) + "..."
<< " ";
@ -1058,10 +1056,6 @@ static UNUSED void printPerformanceCounts(std::vector<ov::ProfilingInfo> perform
<< it.cpu_time.count() / 1000.0 << " ";
stream << std::endl;
}
stream << std::setw(25) << "Total time per layer" << std::endl;
for (const auto& it : layer_to_time) {
stream << it.first << " cnt: " << it.second.first << " time: " << it.second.second << std::endl;
}
stream << std::setw(25) << std::left << "Total time: " << std::fixed << std::setprecision(3)
<< totalTime.count() / 1000.0 << " milliseconds" << std::endl;
stream << std::setw(25) << std::left << "Total CPU time: " << std::fixed << std::setprecision(3)
@ -1185,7 +1179,6 @@ static UNUSED void printPerformanceCountsSort(std::vector<ov::ProfilingInfo> per
std::sort(sortPerfCounts.begin(), sortPerfCounts.end(), sort_pc_descend);
}
std::map<std::string, double> layer_to_time;
for (const auto& it : sortPerfCounts) {
if ((sorttype == pcSimpleSort && it.status == ov::ProfilingInfo::Status::EXECUTED) ||
sorttype != pcSimpleSort) {
@ -1211,7 +1204,6 @@ static UNUSED void printPerformanceCountsSort(std::vector<ov::ProfilingInfo> per
}
stream << "layerType: ";
layer_to_time[it.node_type] += it.cpu_time.count() / 1000.0;
if (it.node_type.length() >= maxPrintLength) {
stream << std::setw(maxPrintLength) << std::left
<< it.node_type.substr(0, maxPrintLength - 3) + "..."
@ -1237,10 +1229,6 @@ static UNUSED void printPerformanceCountsSort(std::vector<ov::ProfilingInfo> per
stream << std::endl;
}
}
stream << std::setw(25) << "Total time per layer" << std::endl;
for (const auto& it : layer_to_time) {
stream << it.first << " " << it.second << std::endl;
}
}
stream << std::setw(25) << std::left << "Total time: " + std::to_string(totalTime.count() / 1000.0)
<< " milliseconds" << std::endl;

View File

@ -10,11 +10,11 @@
#include "openvino/op/util/op_types.hpp"
#include "openvino/opsets/opset9.hpp"
#include "openvino/pass/graph_rewrite.hpp"
#include "openvino/pass/pattern/op/label.hpp"
#include "openvino/pass/pattern/op/wrap_type.hpp"
#include "openvino/util/common_util.hpp"
#include "openvino/util/log.hpp"
#include "openvino/pass/graph_rewrite.hpp"
namespace transpose_sinking {
@ -66,7 +66,7 @@ namespace sink_forward {
* @brief Inserts reversed transposed on @args main_node inputs. Removes input transpose specified in @arg
* transpose_input_info
*/
bool UpdateInputTransposes(const std::shared_ptr<ov::Node>& main_node, const TransposeInputsInfo& transpose_input_info, std::vector<std::shared_ptr<ov::Node>>& new_nodes);
bool UpdateInputTransposes(const std::shared_ptr<ov::Node>& main_node, const TransposeInputsInfo& transpose_input_info);
/**
* @brief Removes @arg input node

View File

@ -134,21 +134,19 @@ ov::pass::TransposeReductionBackward::TransposeReductionBackward() {
MATCHER_SCOPE(TransposeReductionBackward);
auto reduce_or_squeeze_label =
pattern::wrap_type<op::util::ArithmeticReductionKeepDims, op::util::LogicalReductionKeepDims, opset6::Squeeze>(
{pattern::any_input(), pattern::wrap_type<opset6::Constant>()});
pattern::wrap_type<op::util::ArithmeticReductionKeepDims, op::util::LogicalReductionKeepDims, opset6::Squeeze>(
{pattern::any_input(), pattern::wrap_type<opset6::Constant>()});
auto transpose_label =
pattern::wrap_type<opset6::Transpose>({reduce_or_squeeze_label, pattern::wrap_type<opset6::Constant>()});
pattern::wrap_type<opset6::Transpose>({reduce_or_squeeze_label, pattern::wrap_type<opset6::Constant>()});
ov::matcher_pass_callback matcher_pass_callback = [=](ngraph::pattern::Matcher& m) {
const auto& pattern_to_output = m.get_pattern_value_map();
std::cout << "XXXXX here -1" << std::endl;
auto transpose = pattern_to_output.at(transpose_label).get_node_shared_ptr();
auto reduction = pattern_to_output.at(reduce_or_squeeze_label).get_node_shared_ptr();
auto arithmetic_reduce = std::dynamic_pointer_cast<op::util::ArithmeticReductionKeepDims>(reduction);
auto logical_reduce = std::dynamic_pointer_cast<op::util::LogicalReductionKeepDims>(reduction);
auto squeeze = std::dynamic_pointer_cast<opset6::Squeeze>(reduction);
std::cout << "XXXXX here 000" << std::endl;
if (!transpose || !(arithmetic_reduce || logical_reduce || squeeze))
return false;
@ -157,12 +155,10 @@ ov::pass::TransposeReductionBackward::TransposeReductionBackward() {
keep_dims = logical_reduce->get_keep_dims();
else if (arithmetic_reduce)
keep_dims = arithmetic_reduce->get_keep_dims();
std::cout << "XXXXX here 0" << std::endl;
auto transpose_order = std::dynamic_pointer_cast<opset6::Constant>(transpose->get_input_node_shared_ptr(1));
auto reduction_axes = std::dynamic_pointer_cast<opset6::Constant>(reduction->get_input_node_shared_ptr(1));
if (!transpose_order || !reduction_axes)
return false;
std::cout << "XXXXX here 01" << std::endl;
const auto& non_negative_axes = normalize_axes(reduction->get_friendly_name(),
reduction_axes->cast_vector<int64_t>(),
reduction->get_input_partial_shape(0).rank());
@ -174,15 +170,12 @@ ov::pass::TransposeReductionBackward::TransposeReductionBackward() {
for (const auto& axis : non_negative_axes) {
new_values.push_back(reversed_order_values[axis]);
}
std::cout << "XXXXX here 1" << std::endl;
auto new_const = std::make_shared<opset6::Constant>(reduction_axes->get_element_type(), reduction_axes->get_shape(), new_values);
std::cout << "XXXXX here 2" << std::endl;
auto new_const = std::make_shared<opset6::Constant>(reduction_axes->get_element_type(),
reduction_axes->get_shape(),
new_values);
transpose->input(0).replace_source_output(reduction->input_value(0));
std::cout << "XXXXX here 3" << std::endl;
reduction->input(1).replace_source_output(new_const);
std::cout << "XXXXX here 4" << std::endl;
reduction->input(0).replace_source_output(transpose);
std::cout << "XXXXX here 5" << std::endl;
return true;
};
@ -325,12 +318,13 @@ ov::pass::TransposeFQReduction::TransposeFQReduction() {
ov::pass::TransposeFuse::TransposeFuse() {
MATCHER_SCOPE(TransposeFuse);
auto transpose_label = pattern::wrap_type<opset7::Transpose>({pattern::any_input(), pattern::wrap_type<opset7::Constant>()});
auto transpose_label =
pattern::wrap_type<opset7::Transpose>({pattern::any_input(), pattern::wrap_type<opset7::Constant>()});
ov::matcher_pass_callback matcher_pass_callback = [=](ngraph::pattern::Matcher& m) {
const auto& pattern_to_output = m.get_pattern_map();
auto transpose_1 = pattern_to_output.at(transpose_label);
std::cout << "XXXXXXXXXXXXXXX TransposeFuse " << transpose_1->get_friendly_name() << std::endl;
auto order_const_1 = std::dynamic_pointer_cast<opset7::Constant>(transpose_1->input_value(1).get_node_shared_ptr());
auto order_const_1 =
std::dynamic_pointer_cast<opset7::Constant>(transpose_1->input_value(1).get_node_shared_ptr());
auto consumers = transpose_1->get_output_target_inputs(0);
std::vector<int64_t> saved_order_values;
@ -338,20 +332,17 @@ ov::pass::TransposeFuse::TransposeFuse() {
for (const auto& it : consumers) {
auto out_transpose = dynamic_cast<opset7::Transpose*>(it.get_node());
if (!out_transpose) {
std::cout << "XXXXXXX RET 1" << std::endl;
return false;
}
auto order = out_transpose->input_value(1).get_node_shared_ptr();
auto order_const = std::dynamic_pointer_cast<opset7::Constant>(order);
if (!order_const) {
std::cout << "XXXXXXX RET 2" << std::endl;
return false;
}
auto order_values = order_const->cast_vector<int64_t>();
if (order_values.empty()) {
std::cout << "XXXXXXX RET 3" << std::endl;
return false;
}
@ -359,7 +350,6 @@ ov::pass::TransposeFuse::TransposeFuse() {
saved_order_values = order_values;
} else {
if (saved_order_values != order_values) {
std::cout << "XXXXXXX RET 4" << std::endl;
return false;
}
}
@ -371,7 +361,6 @@ ov::pass::TransposeFuse::TransposeFuse() {
auto order1 = order_const_1->cast_vector<int64_t>();
if (order1.size() != saved_order_values.size()) {
std::cout << "XXXXXXX RET 5" << std::endl;
return false;
}
@ -383,12 +372,11 @@ ov::pass::TransposeFuse::TransposeFuse() {
}
if (is_ordered) {
std::cout << "XXXXXX1" << std::endl;
;
for (const auto& it : consumers) {
it.get_node()->output(0).replace(transpose_1->input_value(0));
}
} else {
std::cout << "XXXXXX2" << std::endl;
auto new_order = opset7::Constant::create(saved_type, {saved_order_values.size()}, saved_order_values);
auto new_transpose = register_new_node<opset7::Transpose>(transpose_1->input_value(0), new_order);
for (const auto& it : consumers) {

View File

@ -32,12 +32,10 @@ ov::pass::TransposeSinkingBinaryForward::TransposeSinkingBinaryForward() {
auto main_node = main_node_output.get_node_shared_ptr();
TransposeInputsInfo transpose_input_info = GetFirstTransposeInput(main_node);
std::cout << "XXXXXXXXXX Forward " << main_node->get_friendly_name() << std::endl;
// todo: support dynamic rank case
std::vector<std::shared_ptr<ov::Node>> new_nodes;
bool updated = sink_forward::UpdateInputTransposes(main_node, transpose_input_info, new_nodes);
bool updated = sink_forward::UpdateInputTransposes(main_node, transpose_input_info);
if (!updated) {
std::cout << "XXXXXXXXXX RETURN 5" << std::endl;
return false;
}
for (auto& new_node : sink_forward::InsertOutputTransposes(main_node, transpose_input_info)) {
@ -72,7 +70,7 @@ ov::pass::TransposeSinkingBinaryBackward::TransposeSinkingBinaryBackward() {
auto transpose_const = as_type_ptr<Constant>(pattern_to_output.at(transpose_const_label).get_node_shared_ptr());
auto transpose = pattern_to_output.at(transpose_label).get_node_shared_ptr();
auto main_node = pattern_to_output.at(main_node_label).get_node_shared_ptr();
std::cout << "XXXXXXXXXX Backward " << main_node->get_friendly_name() << std::endl;
for (auto& new_node : sink_backward::InsertTransposeBeforeNode(main_node, transpose_const)) {
register_new_node(new_node);
}

View File

@ -38,16 +38,11 @@ ov::pass::TransposeSinkingConcatForward::TransposeSinkingConcatForward() {
return false;
}
// todo: support dyn rank case
std::vector<std::shared_ptr<ov::Node>> new_nodes;
bool updated = sink_forward::UpdateInputTransposes(main_node, transpose_input_info, new_nodes);
bool updated = sink_forward::UpdateInputTransposes(main_node, transpose_input_info);
if (!updated) {
return false;
}
for (const auto& it : new_nodes) {
std::cout << "XXXXXX reg new node " << it->get_friendly_name() << std::endl;
register_new_node(it);
}
for (auto& new_node : sink_forward::InsertOutputTransposes(main_node, transpose_input_info)) {
register_new_node(new_node);
transpose_sinking::UpdateForwardSinkingAbility(new_node);
@ -71,21 +66,19 @@ ov::pass::TransposeSinkingConcatBackward::TransposeSinkingConcatBackward() {
return has_static_rank()(output) && HasSameOutputTransposeNodes(output);
});
/* auto transpose_const_label = wrap_type<Constant>();
/* auto transpose_const_label = wrap_type<Constant>();
auto transpose_label =
wrap_type<Transpose>({main_node_label, transpose_const_label}, [](const Output<Node>& output) -> bool {
return has_static_rank()(output);
});*/
auto transpose_label =
wrap_type<Transpose>({main_node_label, transpose_const_label}, [](const Output<Node>& output) -> bool {
return has_static_rank()(output);
});*/
matcher_pass_callback matcher_pass_callback = [=](Matcher& m) {
const auto& pattern_to_output = m.get_pattern_value_map();
auto main_node = pattern_to_output.at(main_node_label).get_node_shared_ptr();
auto transpose = pattern_to_output.at(main_node->input_value(0).get_node_shared_ptr()).get_node_shared_ptr();
auto transpose_const = as_type_ptr<Constant>(transpose->input_value(1).get_node_shared_ptr());
std::cout << "XXXXXXXXXX: CONCAT backward " << main_node->get_friendly_name() << std::endl;
auto concat_node = as_type_ptr<Concat>(main_node);
auto concat_axis = concat_node->get_concatenation_axis();
if (concat_axis < 0) {

View File

@ -45,12 +45,8 @@ bool ov::pass::TransposeSinkingGeneral::run_on_model(const std::shared_ptr<ov::M
RUN_ON_FUNCTION_SCOPE(TransposeSinkingGeneral);
{
ngraph::pass::Manager manager(get_pass_config());
manager.register_pass<ov::pass::Serialize>("/home/tikhonov/OpenVINO/tmp/serialized/ts_before_forward.xml",
"/home/tikhonov/OpenVINO/tmp/serialized/ts_before_forward.bin");
manager.register_pass<ov::pass::TransposeSinkingGeneralForward>();
manager.register_pass<ngraph::pass::ConstantFolding>();
manager.register_pass<ov::pass::Serialize>("/home/tikhonov/OpenVINO/tmp/serialized/ts_after_forward.xml",
"/home/tikhonov/OpenVINO/tmp/serialized/ts_after_forward.bin");
manager.run_passes(f);
}

View File

@ -155,17 +155,13 @@ AxisVector AlignTransposeOrder(const Output<Node>& output, const TransposeInputs
return new_transpose_order;
}
bool UpdateInputTransposes(const NodePtr& main_node, const TransposeInputsInfo& transpose_input_info, std::vector<std::shared_ptr<ov::Node>>& new_nodes) {
if (transpose_input_info.isEmpty() || HasDynamicRankInput(main_node)) {
std::cout << "XXXXXXXXXXXXXXxx RETURN 1" << std::endl;
bool UpdateInputTransposes(const NodePtr& main_node, const TransposeInputsInfo& transpose_input_info) {
if (transpose_input_info.isEmpty() || HasDynamicRankInput(main_node))
return false;
}
const auto max_input_rank = GetMaxInputRank(main_node);
if (max_input_rank < 0) {
std::cout << "XXXXXXXXXXXXXXxx RETURN 2" << std::endl;
if (max_input_rank < 0)
return false;
}
const size_t transpose_input_index = transpose_input_info.input_idx;
const auto transpose_element_type = transpose_input_info.transpose_const->get_element_type();
@ -179,7 +175,6 @@ bool UpdateInputTransposes(const NodePtr& main_node, const TransposeInputsInfo&
input_node = FixInputNodeRank(input_node, max_input_rank);
auto transpose_order = AlignTransposeOrder(input_node, transpose_input_info);
if (transpose_order.empty()) {
std::cout << "XXXXXXXXXXXXXXxx RETURN 3" << std::endl;
return false;
}
const auto reversed_transpose_axis_order = ReverseTransposeOrder(transpose_order);
@ -187,7 +182,7 @@ bool UpdateInputTransposes(const NodePtr& main_node, const TransposeInputsInfo&
Shape{reversed_transpose_axis_order.size()},
reversed_transpose_axis_order);
auto new_transpose = std::make_shared<Transpose>(input_node, new_transpose_const);
new_nodes.push_back(new_transpose);
main_node->input(i).replace_source_output(new_transpose->output(0));
copy_runtime_info(input_node.get_node_shared_ptr(), {new_transpose, new_transpose_const});

View File

@ -18,6 +18,7 @@
using namespace testing;
/*
TEST(TransformationTests, ConcatTest) {
auto data1 = std::make_shared<ngraph::opset1::Parameter>(ngraph::element::f32, ngraph::Shape{1, 28, 28, 1, 58});
auto data2 = std::make_shared<ngraph::opset1::Parameter>(ngraph::element::f32, ngraph::Shape{1, 28, 28, 1, 58});
@ -29,6 +30,7 @@ TEST(TransformationTests, ConcatTest) {
auto model = std::make_shared<ov::Model>(ov::OutputVector{X}, ov::ParameterVector{data1, data2});
serialize(model, "/home/tikhonov/OpenVINO/tmp/concat_5d_axis" + std::to_string(axis) +".xml");
}
*/
TEST(TransformationTests, DivideFusion) {
std::shared_ptr<ngraph::Function> f, f_ref;