Fixed coverity issues for core componets (#9558)

* Fixed coverity issues for core componets

* Fixed some tests
This commit is contained in:
Ilya Churaev 2022-01-11 17:28:50 +03:00 committed by GitHub
parent a49f1b3bc6
commit 02cabcda3c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
21 changed files with 34 additions and 31 deletions

View File

@ -48,7 +48,7 @@ private:
static constexpr size_t INPUT_MEAN = 3;
static constexpr size_t INPUT_VARIANCE = 4;
double m_epsilon;
double m_epsilon{0};
};
} // namespace v0
namespace v5 {
@ -89,7 +89,7 @@ private:
static constexpr size_t INPUT_MEAN = 3;
static constexpr size_t INPUT_VARIANCE = 4;
double m_epsilon;
double m_epsilon{0};
};
} // namespace v5
} // namespace op

View File

@ -51,7 +51,7 @@ public:
private:
element::Type m_output_type;
bool m_with_right_bound;
bool m_with_right_bound{true};
};
} // namespace v3
} // namespace op

View File

@ -709,7 +709,7 @@ private:
element::Type m_element_type;
Shape m_shape{};
std::shared_ptr<ngraph::runtime::AlignedBuffer> m_data;
bool m_all_elements_bitwise_identical;
bool m_all_elements_bitwise_identical = false;
bool m_alloc_buffer_on_visit_attributes = true;
};
} // namespace v0

View File

@ -31,7 +31,7 @@ public:
}
private:
bool m_ctc_merge_repeated;
bool m_ctc_merge_repeated{true};
};
} // namespace v0
} // namespace op

View File

@ -92,7 +92,7 @@ public:
}
private:
bool m_merge_repeated;
bool m_merge_repeated{true};
element::Type m_classes_index_type{element::i32};
element::Type m_sequence_length_type{element::i32};
};

View File

@ -166,7 +166,7 @@ public:
}
private:
bool m_bilinear_interpolation_pad;
bool m_bilinear_interpolation_pad{false};
};
} // namespace v8
} // namespace op

View File

@ -94,8 +94,8 @@ public:
}
private:
int64_t m_output_dim;
float m_spatial_scale;
int64_t m_output_dim{0};
float m_spatial_scale{0};
int64_t m_group_size = 1;
std::string m_mode = "bilinear_deformable";
int64_t m_spatial_bins_x = 1;

View File

@ -35,7 +35,7 @@ public:
}
private:
double m_alpha;
double m_alpha{0};
};
} // namespace v0
} // namespace op

View File

@ -39,7 +39,7 @@ public:
}
private:
size_t m_max_rois;
size_t m_max_rois{0};
template <class T>
friend void shape_infer(ExperimentalDetectronTopKROIs* op,

View File

@ -64,7 +64,7 @@ private:
Shape m_patch_sizes;
Strides m_patch_movement_strides;
Shape m_patch_selection_rates;
PadType m_padding;
PadType m_padding{PadType::EXPLICIT};
template <class T>
friend void shape_infer(const ExtractImagePatches* op,
const std::vector<T>& input_shapes,

View File

@ -33,7 +33,7 @@ public:
}
private:
int64_t m_axis;
int64_t m_axis{0};
template <class T>
void friend shape_infer(const GatherElements* op,
const std::vector<T>& input_shapes,

View File

@ -191,7 +191,7 @@ public:
}
private:
direction m_direction;
direction m_direction{direction::FORWARD};
};
} // namespace v5
} // namespace op

View File

@ -74,7 +74,7 @@ public:
bool has_evaluate() const override;
private:
PadMode m_pad_mode;
PadMode m_pad_mode{PadMode::CONSTANT};
bool evaluate_pad(const HostTensorVector& outputs, const HostTensorVector& inputs) const;
};
} // namespace v1

View File

@ -45,7 +45,7 @@ public:
private:
Shape m_output_size{0, 0};
float m_spatial_scale;
float m_spatial_scale{0};
std::string m_method = "max";
};
} // namespace v0

View File

@ -44,7 +44,7 @@ public:
}
private:
RoundMode m_mode;
RoundMode m_mode{RoundMode::HALF_TO_EVEN};
};
} // namespace v5
} // namespace op

View File

@ -160,7 +160,7 @@ void shape_infer_base(const DetectionOutputBase* op,
if (!num_prior_boxes && box_logits_pshape[1].is_static()) {
auto box_logits_pshape_2nd_dim = box_logits_pshape[1].get_length();
NODE_VALIDATION_CHECK(op,
(box_logits_pshape_2nd_dim % (num_loc_classes * 4)) == 0,
num_loc_classes != 0 && (box_logits_pshape_2nd_dim % (num_loc_classes * 4)) == 0,
"Box logits' second dimension must be a multiply of num_loc_classes * 4 (",
num_loc_classes * 4,
"). Current value is: ",

View File

@ -71,7 +71,7 @@ void shape_infer(const Einsum* op, const std::vector<T>& input_shapes, std::vect
label_to_shape[label].compatible(T{pshape[label_ind]}),
"Different input dimensions indicated by the same labels for Einsum "
"must be compatible.");
T::merge_into(label_to_shape[label], T{pshape[dim_ind]});
OPENVINO_ASSERT(T::merge_into(label_to_shape[label], T{pshape[dim_ind]}));
}
++dim_ind;
}

View File

@ -3,6 +3,7 @@
//
#pragma once
#include <openvino/op/lstm_cell.hpp>
#include "utils.hpp"
namespace ov {
@ -16,7 +17,7 @@ void lstm_shape_infer(const OpsType* op,
using DimType = typename std::iterator_traits<typename ShapeType::iterator>::value_type;
enum { X, initial_hidden_state, initial_cell_state, W, R, B };
std::vector<bool> input_rank_static(6, false);
bool all_rank_dynamic = false;
bool all_rank_dynamic = true;
bool all_rank_static = true;
// Prepare OutShape
auto& hidden_shape = output_shapes[0];
@ -43,7 +44,7 @@ void lstm_shape_infer(const OpsType* op,
bool is_hidden_init = false;
// deduce batch/hidden_size
for (size_t i = 0; i < input_shapes.size() && i < 6 ; i++) {
for (size_t i = 0; i < input_shapes.size() && i < 6; i++) {
const auto& input = input_shapes[i];
if (input_rank_static[i]) {
// batch could be deduced from x, cell_state or hidden_state
@ -94,7 +95,9 @@ void lstm_shape_infer(const OpsType* op,
} else {
NODE_VALIDATION_CHECK(
op,
DimType::merge(output_hidden_size, output_hidden_size, input[0].get_length() / gates_count),
DimType::merge(output_hidden_size,
output_hidden_size,
input[0].get_length() / gates_count),
"Parameter hidden_size not matched for W, R, B, initial_hidden_state and "
"initial_cell_state "
"inputs.");
@ -115,7 +118,9 @@ void lstm_shape_infer(const OpsType* op,
} else {
NODE_VALIDATION_CHECK(
op,
DimType::merge(output_hidden_size, output_hidden_size, input[0].get_length() / gates_count),
DimType::merge(output_hidden_size,
output_hidden_size,
input[0].get_length() / gates_count),
"Parameter hidden_size not matched for W, R, B, initial_hidden_state and "
"initial_cell_state "
"inputs.");
@ -140,9 +145,7 @@ void lstm_shape_infer(const OpsType* op,
// Check peepholes
if (input_shapes.size() == 7) {
const auto& p_pshape = input_shapes[6];
NODE_VALIDATION_CHECK(op,
(p_pshape.rank().compatible(1)),
"LSTMCell input tensor P shall have dimension 1D.");
NODE_VALIDATION_CHECK(op, (p_pshape.rank().compatible(1)), "LSTMCell input tensor P shall have dimension 1D.");
}
// check input size

View File

@ -66,7 +66,7 @@ void infer_prop_shape(const OpType* op,
auto out_dim = DimType{};
if (class_probs_ps.rank().is_static() && bbox_deltas_ps.rank().is_static()) {
DimType::merge(out_dim, class_probs_ps[0], bbox_deltas_ps[0]);
OPENVINO_ASSERT(DimType::merge(out_dim, class_probs_ps[0], bbox_deltas_ps[0]));
} else if (class_probs_ps.rank().is_static()) {
out_dim = class_probs_ps[0];
} else if (bbox_deltas_ps.rank().is_static()) {

View File

@ -62,7 +62,7 @@ void shape_infer(const ov::op::v3::ROIAlign* op, const std::vector<T>& input_sha
// if either of those 2 dimensions is static its value will be used
// for the first dimension of the output shape - 'NUM_ROIS'
if (rois_ps_rank.is_static() && batch_indices_ps_rank.is_static()) {
DimType::merge(output_shape[0], batch_indices_ps[0], rois_ps[0]);
OPENVINO_ASSERT(DimType::merge(output_shape[0], batch_indices_ps[0], rois_ps[0]));
} else if (rois_ps_rank.is_static()) {
output_shape[0] = rois_ps[0];
} else if (batch_indices_ps_rank.is_static()) {

View File

@ -53,7 +53,7 @@ public:
}
// Load plugins until we found the right one
for (auto& plugin : m_plugins) {
plugin.load();
OPENVINO_ASSERT(plugin.load(), "Cannot load frontend ", plugin.get_name_from_file());
if (plugin.get_creator().m_name == framework) {
auto fe_obj = std::make_shared<FrontEnd>();
fe_obj->m_shared_object = plugin.get_so_pointer();