Fix static analysis issues in pruning mask propagation (#17910)
Ticket: CVS-108960
This commit is contained in:
parent
93689cc417
commit
8653f1cbd9
@ -441,7 +441,7 @@ public:
|
||||
const auto concat = std::make_shared<opset10::Concat>(
|
||||
NodeVector{opset10::Constant::create(m_shape.get_element_type(), {2}, {-1, 1}), gather},
|
||||
0);
|
||||
for (auto consumer : m_shape_consumers)
|
||||
for (auto& consumer : m_shape_consumers)
|
||||
consumer.replace_source_output(concat);
|
||||
|
||||
// This transformation propagates only Reshape mask and doesn't do anything with GroupConvolution.
|
||||
@ -527,13 +527,13 @@ public:
|
||||
std::inserter(weights_shape_mask, weights_shape_mask.begin()),
|
||||
ge_zero_pred);
|
||||
|
||||
for (auto& elem : weights_shape_broadcasted_dims) {
|
||||
for (const auto elem : weights_shape_broadcasted_dims) {
|
||||
const auto shifted_elem = elem + input_shape_size_diff;
|
||||
if (shifted_elem >= 0)
|
||||
input_shape_mask.insert(shifted_elem);
|
||||
}
|
||||
|
||||
for (auto& elem : input_shape_broadcasted_dims) {
|
||||
for (const auto elem : input_shape_broadcasted_dims) {
|
||||
const auto shifted_elem = elem + weights_shape_size_diff;
|
||||
if (shifted_elem >= 0)
|
||||
weights_shape_mask.insert(shifted_elem);
|
||||
@ -611,7 +611,7 @@ public:
|
||||
weights_mask->add_callback(
|
||||
[input_mask_row, weights_shape_mask](Mask::Ptr cur_mask) -> bool {
|
||||
cur_mask->copy_value_from_mask_reversed_masked(input_mask_row, weights_shape_mask);
|
||||
for (auto& dim : weights_shape_mask)
|
||||
for (const auto dim : weights_shape_mask)
|
||||
cur_mask->at(dim).clear();
|
||||
return true;
|
||||
},
|
||||
@ -714,7 +714,7 @@ public:
|
||||
return false;
|
||||
size_t idx = 0;
|
||||
if (fq_node->get_auto_broadcast() != ngraph::op::AutoBroadcastType::NONE) {
|
||||
for (auto node : fq_params_nodes) {
|
||||
for (const auto& node : fq_params_nodes) {
|
||||
auto const_node = std::dynamic_pointer_cast<op::Constant>(node);
|
||||
if (!const_node)
|
||||
OPENVINO_THROW("Unexpected operation type.");
|
||||
@ -734,7 +734,7 @@ public:
|
||||
return true;
|
||||
};
|
||||
|
||||
for (auto fq_param : fq_params_nodes) {
|
||||
for (const auto& fq_param : fq_params_nodes) {
|
||||
auto mask = std::make_shared<Mask>(fq_param->get_shape().size());
|
||||
mask->add_callback(fq_params_mask_callback, input_mask);
|
||||
input_mask->add_callback(
|
||||
@ -805,7 +805,7 @@ public:
|
||||
|
||||
for (size_t i = 0; i < input_sizes.size(); ++i) {
|
||||
if (input_masks_row.count(i)) {
|
||||
for (auto idx : input_masks_row.at(i)->at(axis)) {
|
||||
for (const auto idx : input_masks_row.at(i)->at(axis)) {
|
||||
cur_mask->at(axis).insert(idx + cur_size);
|
||||
}
|
||||
}
|
||||
@ -822,7 +822,7 @@ public:
|
||||
min_val += input_sizes[i];
|
||||
}
|
||||
uint64_t max_val = min_val + input_sizes[input_idx];
|
||||
for (auto idx : output_mask_row->at(axis)) {
|
||||
for (const auto idx : output_mask_row->at(axis)) {
|
||||
if (idx < max_val && idx >= min_val) {
|
||||
cur_mask->at(axis).insert(idx - min_val);
|
||||
}
|
||||
@ -1036,7 +1036,7 @@ static ChannelsMap map_channels(const std::set<uint64_t> squized_mask_dim,
|
||||
auto squized_mask_res = std::set<uint64_t>();
|
||||
auto unsquized_mask = std::map<uint64_t, std::set<uint64_t>>();
|
||||
auto suspicious_elems = std::set<uint64_t>();
|
||||
for (auto& unsquized_dim : unsquized_dims) {
|
||||
for (const auto unsquized_dim : unsquized_dims) {
|
||||
unsquized_mask[unsquized_dim] = std::set<uint64_t>();
|
||||
auto squized_mask_dim_copy = std::set<uint64_t>();
|
||||
const auto unsquized_shift = unsquized_dim - unsquized_dims[0];
|
||||
@ -1129,7 +1129,7 @@ public:
|
||||
|
||||
// Check if this reshape is before group convolution
|
||||
// In such case this reshape should be processed by GroupConvolutionReshape pass
|
||||
for (const auto inp : m_output.get_target_inputs())
|
||||
for (const auto& inp : m_output.get_target_inputs())
|
||||
if (is_type<opset10::GroupConvolution>(inp.get_node()))
|
||||
return true;
|
||||
|
||||
@ -1245,9 +1245,9 @@ public:
|
||||
[=](Mask::Ptr cur_mask) -> bool {
|
||||
for (size_t in_dim = 0; in_dim < dims_map.size(); ++in_dim) {
|
||||
cur_mask->at(in_dim).clear();
|
||||
for (auto& out_dim : dims_map[in_dim]) {
|
||||
for (const auto out_dim : dims_map[in_dim]) {
|
||||
const auto unsquized_shift = out_dim - dims_map[in_dim][0];
|
||||
for (auto& ch : weights_mask_row->at(out_dim)) {
|
||||
for (const auto ch : weights_mask_row->at(out_dim)) {
|
||||
NGRAPH_SUPPRESS_DEPRECATED_START
|
||||
auto iter = get_channel_iter(dims_shape[in_dim], unsquized_shift, ch);
|
||||
for (const auto& coord : iter)
|
||||
@ -1267,7 +1267,7 @@ public:
|
||||
dims_map[in_dim],
|
||||
dims_attrs,
|
||||
dims_shape[in_dim]);
|
||||
for (auto& dim : map.unsquized_mask)
|
||||
for (const auto& dim : map.unsquized_mask)
|
||||
cur_mask->at(dim.first) = dim.second;
|
||||
if (map.should_init)
|
||||
cur_mask->initialize_dependencies();
|
||||
@ -1305,7 +1305,7 @@ public:
|
||||
dims_map[out_dim],
|
||||
dims_attrs,
|
||||
dims_shape[out_dim]);
|
||||
for (auto& dim : map.unsquized_mask)
|
||||
for (const auto& dim : map.unsquized_mask)
|
||||
cur_mask->at(dim.first) = dim.second;
|
||||
if (map.should_init)
|
||||
cur_mask->initialize_dependencies();
|
||||
@ -1318,9 +1318,9 @@ public:
|
||||
[=](Mask::Ptr cur_mask) -> bool {
|
||||
for (size_t out_dim = 0; out_dim < dims_map.size(); ++out_dim) {
|
||||
cur_mask->at(out_dim).clear();
|
||||
for (auto& in_dim : dims_map[out_dim]) {
|
||||
for (const auto in_dim : dims_map[out_dim]) {
|
||||
const auto unsquized_shift = in_dim - dims_map[out_dim][0];
|
||||
for (auto& ch : input_mask_row->at(in_dim)) {
|
||||
for (const auto ch : input_mask_row->at(in_dim)) {
|
||||
NGRAPH_SUPPRESS_DEPRECATED_START
|
||||
auto iter = get_channel_iter(dims_shape[out_dim], unsquized_shift, ch);
|
||||
for (const auto& coord : iter)
|
||||
@ -1416,7 +1416,7 @@ public:
|
||||
output_mask->add_callback(
|
||||
[input_mask_row, forward_order](Mask::Ptr cur_mask) -> bool {
|
||||
cur_mask->clear();
|
||||
for (auto& dim : forward_order)
|
||||
for (const auto dim : forward_order)
|
||||
cur_mask->push_back(input_mask_row->at(dim));
|
||||
return true;
|
||||
},
|
||||
@ -1424,7 +1424,7 @@ public:
|
||||
input_mask->add_callback(
|
||||
[output_mask_row, backward_order](Mask::Ptr cur_mask) -> bool {
|
||||
cur_mask->clear();
|
||||
for (auto& dim : backward_order)
|
||||
for (const auto dim : backward_order)
|
||||
cur_mask->push_back(output_mask_row->at(dim));
|
||||
return true;
|
||||
},
|
||||
@ -1464,7 +1464,7 @@ static ngraph::Mask::Ptr create_connect_split_output_mask(ngraph::Mask::Ptr inpu
|
||||
for (size_t j = 0; j < output_mask_raw->size(); j++) {
|
||||
const auto& dim_mask = output_mask_raw->at(j);
|
||||
if (static_cast<int64_t>(j) == axis) {
|
||||
for (auto d : dim_mask)
|
||||
for (const auto d : dim_mask)
|
||||
cur_mask->at(j).insert(d + split_start);
|
||||
} else {
|
||||
cur_mask->at(j) = dim_mask;
|
||||
|
Loading…
Reference in New Issue
Block a user