Fixed coverity issues for core componets (#9558)

* Fixed coverity issues for core componets

* Fixed some tests
This commit is contained in:
Ilya Churaev 2022-01-11 17:28:50 +03:00 committed by GitHub
parent a49f1b3bc6
commit 02cabcda3c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
21 changed files with 34 additions and 31 deletions

View File

@ -48,7 +48,7 @@ private:
static constexpr size_t INPUT_MEAN = 3; static constexpr size_t INPUT_MEAN = 3;
static constexpr size_t INPUT_VARIANCE = 4; static constexpr size_t INPUT_VARIANCE = 4;
double m_epsilon; double m_epsilon{0};
}; };
} // namespace v0 } // namespace v0
namespace v5 { namespace v5 {
@ -89,7 +89,7 @@ private:
static constexpr size_t INPUT_MEAN = 3; static constexpr size_t INPUT_MEAN = 3;
static constexpr size_t INPUT_VARIANCE = 4; static constexpr size_t INPUT_VARIANCE = 4;
double m_epsilon; double m_epsilon{0};
}; };
} // namespace v5 } // namespace v5
} // namespace op } // namespace op

View File

@ -51,7 +51,7 @@ public:
private: private:
element::Type m_output_type; element::Type m_output_type;
bool m_with_right_bound; bool m_with_right_bound{true};
}; };
} // namespace v3 } // namespace v3
} // namespace op } // namespace op

View File

@ -709,7 +709,7 @@ private:
element::Type m_element_type; element::Type m_element_type;
Shape m_shape{}; Shape m_shape{};
std::shared_ptr<ngraph::runtime::AlignedBuffer> m_data; std::shared_ptr<ngraph::runtime::AlignedBuffer> m_data;
bool m_all_elements_bitwise_identical; bool m_all_elements_bitwise_identical = false;
bool m_alloc_buffer_on_visit_attributes = true; bool m_alloc_buffer_on_visit_attributes = true;
}; };
} // namespace v0 } // namespace v0

View File

@ -31,7 +31,7 @@ public:
} }
private: private:
bool m_ctc_merge_repeated; bool m_ctc_merge_repeated{true};
}; };
} // namespace v0 } // namespace v0
} // namespace op } // namespace op

View File

@ -92,7 +92,7 @@ public:
} }
private: private:
bool m_merge_repeated; bool m_merge_repeated{true};
element::Type m_classes_index_type{element::i32}; element::Type m_classes_index_type{element::i32};
element::Type m_sequence_length_type{element::i32}; element::Type m_sequence_length_type{element::i32};
}; };

View File

@ -166,7 +166,7 @@ public:
} }
private: private:
bool m_bilinear_interpolation_pad; bool m_bilinear_interpolation_pad{false};
}; };
} // namespace v8 } // namespace v8
} // namespace op } // namespace op

View File

@ -94,8 +94,8 @@ public:
} }
private: private:
int64_t m_output_dim; int64_t m_output_dim{0};
float m_spatial_scale; float m_spatial_scale{0};
int64_t m_group_size = 1; int64_t m_group_size = 1;
std::string m_mode = "bilinear_deformable"; std::string m_mode = "bilinear_deformable";
int64_t m_spatial_bins_x = 1; int64_t m_spatial_bins_x = 1;

View File

@ -35,7 +35,7 @@ public:
} }
private: private:
double m_alpha; double m_alpha{0};
}; };
} // namespace v0 } // namespace v0
} // namespace op } // namespace op

View File

@ -39,7 +39,7 @@ public:
} }
private: private:
size_t m_max_rois; size_t m_max_rois{0};
template <class T> template <class T>
friend void shape_infer(ExperimentalDetectronTopKROIs* op, friend void shape_infer(ExperimentalDetectronTopKROIs* op,

View File

@ -64,7 +64,7 @@ private:
Shape m_patch_sizes; Shape m_patch_sizes;
Strides m_patch_movement_strides; Strides m_patch_movement_strides;
Shape m_patch_selection_rates; Shape m_patch_selection_rates;
PadType m_padding; PadType m_padding{PadType::EXPLICIT};
template <class T> template <class T>
friend void shape_infer(const ExtractImagePatches* op, friend void shape_infer(const ExtractImagePatches* op,
const std::vector<T>& input_shapes, const std::vector<T>& input_shapes,

View File

@ -33,7 +33,7 @@ public:
} }
private: private:
int64_t m_axis; int64_t m_axis{0};
template <class T> template <class T>
void friend shape_infer(const GatherElements* op, void friend shape_infer(const GatherElements* op,
const std::vector<T>& input_shapes, const std::vector<T>& input_shapes,

View File

@ -191,7 +191,7 @@ public:
} }
private: private:
direction m_direction; direction m_direction{direction::FORWARD};
}; };
} // namespace v5 } // namespace v5
} // namespace op } // namespace op

View File

@ -74,7 +74,7 @@ public:
bool has_evaluate() const override; bool has_evaluate() const override;
private: private:
PadMode m_pad_mode; PadMode m_pad_mode{PadMode::CONSTANT};
bool evaluate_pad(const HostTensorVector& outputs, const HostTensorVector& inputs) const; bool evaluate_pad(const HostTensorVector& outputs, const HostTensorVector& inputs) const;
}; };
} // namespace v1 } // namespace v1

View File

@ -45,7 +45,7 @@ public:
private: private:
Shape m_output_size{0, 0}; Shape m_output_size{0, 0};
float m_spatial_scale; float m_spatial_scale{0};
std::string m_method = "max"; std::string m_method = "max";
}; };
} // namespace v0 } // namespace v0

View File

@ -44,7 +44,7 @@ public:
} }
private: private:
RoundMode m_mode; RoundMode m_mode{RoundMode::HALF_TO_EVEN};
}; };
} // namespace v5 } // namespace v5
} // namespace op } // namespace op

View File

@ -160,7 +160,7 @@ void shape_infer_base(const DetectionOutputBase* op,
if (!num_prior_boxes && box_logits_pshape[1].is_static()) { if (!num_prior_boxes && box_logits_pshape[1].is_static()) {
auto box_logits_pshape_2nd_dim = box_logits_pshape[1].get_length(); auto box_logits_pshape_2nd_dim = box_logits_pshape[1].get_length();
NODE_VALIDATION_CHECK(op, NODE_VALIDATION_CHECK(op,
(box_logits_pshape_2nd_dim % (num_loc_classes * 4)) == 0, num_loc_classes != 0 && (box_logits_pshape_2nd_dim % (num_loc_classes * 4)) == 0,
"Box logits' second dimension must be a multiply of num_loc_classes * 4 (", "Box logits' second dimension must be a multiply of num_loc_classes * 4 (",
num_loc_classes * 4, num_loc_classes * 4,
"). Current value is: ", "). Current value is: ",
@ -340,4 +340,4 @@ void shape_infer(const DetectionOutput* op, const std::vector<T>& input_shapes,
} // namespace v8 } // namespace v8
} // namespace op } // namespace op
} // namespace ov } // namespace ov

View File

@ -71,7 +71,7 @@ void shape_infer(const Einsum* op, const std::vector<T>& input_shapes, std::vect
label_to_shape[label].compatible(T{pshape[label_ind]}), label_to_shape[label].compatible(T{pshape[label_ind]}),
"Different input dimensions indicated by the same labels for Einsum " "Different input dimensions indicated by the same labels for Einsum "
"must be compatible."); "must be compatible.");
T::merge_into(label_to_shape[label], T{pshape[dim_ind]}); OPENVINO_ASSERT(T::merge_into(label_to_shape[label], T{pshape[dim_ind]}));
} }
++dim_ind; ++dim_ind;
} }

View File

@ -3,6 +3,7 @@
// //
#pragma once #pragma once
#include <openvino/op/lstm_cell.hpp> #include <openvino/op/lstm_cell.hpp>
#include "utils.hpp" #include "utils.hpp"
namespace ov { namespace ov {
@ -16,7 +17,7 @@ void lstm_shape_infer(const OpsType* op,
using DimType = typename std::iterator_traits<typename ShapeType::iterator>::value_type; using DimType = typename std::iterator_traits<typename ShapeType::iterator>::value_type;
enum { X, initial_hidden_state, initial_cell_state, W, R, B }; enum { X, initial_hidden_state, initial_cell_state, W, R, B };
std::vector<bool> input_rank_static(6, false); std::vector<bool> input_rank_static(6, false);
bool all_rank_dynamic = false; bool all_rank_dynamic = true;
bool all_rank_static = true; bool all_rank_static = true;
// Prepare OutShape // Prepare OutShape
auto& hidden_shape = output_shapes[0]; auto& hidden_shape = output_shapes[0];
@ -43,7 +44,7 @@ void lstm_shape_infer(const OpsType* op,
bool is_hidden_init = false; bool is_hidden_init = false;
// deduce batch/hidden_size // deduce batch/hidden_size
for (size_t i = 0; i < input_shapes.size() && i < 6 ; i++) { for (size_t i = 0; i < input_shapes.size() && i < 6; i++) {
const auto& input = input_shapes[i]; const auto& input = input_shapes[i];
if (input_rank_static[i]) { if (input_rank_static[i]) {
// batch could be deduced from x, cell_state or hidden_state // batch could be deduced from x, cell_state or hidden_state
@ -94,7 +95,9 @@ void lstm_shape_infer(const OpsType* op,
} else { } else {
NODE_VALIDATION_CHECK( NODE_VALIDATION_CHECK(
op, op,
DimType::merge(output_hidden_size, output_hidden_size, input[0].get_length() / gates_count), DimType::merge(output_hidden_size,
output_hidden_size,
input[0].get_length() / gates_count),
"Parameter hidden_size not matched for W, R, B, initial_hidden_state and " "Parameter hidden_size not matched for W, R, B, initial_hidden_state and "
"initial_cell_state " "initial_cell_state "
"inputs."); "inputs.");
@ -115,7 +118,9 @@ void lstm_shape_infer(const OpsType* op,
} else { } else {
NODE_VALIDATION_CHECK( NODE_VALIDATION_CHECK(
op, op,
DimType::merge(output_hidden_size, output_hidden_size, input[0].get_length() / gates_count), DimType::merge(output_hidden_size,
output_hidden_size,
input[0].get_length() / gates_count),
"Parameter hidden_size not matched for W, R, B, initial_hidden_state and " "Parameter hidden_size not matched for W, R, B, initial_hidden_state and "
"initial_cell_state " "initial_cell_state "
"inputs."); "inputs.");
@ -140,9 +145,7 @@ void lstm_shape_infer(const OpsType* op,
// Check peepholes // Check peepholes
if (input_shapes.size() == 7) { if (input_shapes.size() == 7) {
const auto& p_pshape = input_shapes[6]; const auto& p_pshape = input_shapes[6];
NODE_VALIDATION_CHECK(op, NODE_VALIDATION_CHECK(op, (p_pshape.rank().compatible(1)), "LSTMCell input tensor P shall have dimension 1D.");
(p_pshape.rank().compatible(1)),
"LSTMCell input tensor P shall have dimension 1D.");
} }
// check input size // check input size

View File

@ -66,7 +66,7 @@ void infer_prop_shape(const OpType* op,
auto out_dim = DimType{}; auto out_dim = DimType{};
if (class_probs_ps.rank().is_static() && bbox_deltas_ps.rank().is_static()) { if (class_probs_ps.rank().is_static() && bbox_deltas_ps.rank().is_static()) {
DimType::merge(out_dim, class_probs_ps[0], bbox_deltas_ps[0]); OPENVINO_ASSERT(DimType::merge(out_dim, class_probs_ps[0], bbox_deltas_ps[0]));
} else if (class_probs_ps.rank().is_static()) { } else if (class_probs_ps.rank().is_static()) {
out_dim = class_probs_ps[0]; out_dim = class_probs_ps[0];
} else if (bbox_deltas_ps.rank().is_static()) { } else if (bbox_deltas_ps.rank().is_static()) {

View File

@ -62,7 +62,7 @@ void shape_infer(const ov::op::v3::ROIAlign* op, const std::vector<T>& input_sha
// if either of those 2 dimensions is static its value will be used // if either of those 2 dimensions is static its value will be used
// for the first dimension of the output shape - 'NUM_ROIS' // for the first dimension of the output shape - 'NUM_ROIS'
if (rois_ps_rank.is_static() && batch_indices_ps_rank.is_static()) { if (rois_ps_rank.is_static() && batch_indices_ps_rank.is_static()) {
DimType::merge(output_shape[0], batch_indices_ps[0], rois_ps[0]); OPENVINO_ASSERT(DimType::merge(output_shape[0], batch_indices_ps[0], rois_ps[0]));
} else if (rois_ps_rank.is_static()) { } else if (rois_ps_rank.is_static()) {
output_shape[0] = rois_ps[0]; output_shape[0] = rois_ps[0];
} else if (batch_indices_ps_rank.is_static()) { } else if (batch_indices_ps_rank.is_static()) {
@ -74,4 +74,4 @@ void shape_infer(const ov::op::v3::ROIAlign* op, const std::vector<T>& input_sha
} // namespace v3 } // namespace v3
} // namespace op } // namespace op
} // namespace ov } // namespace ov

View File

@ -53,7 +53,7 @@ public:
} }
// Load plugins until we found the right one // Load plugins until we found the right one
for (auto& plugin : m_plugins) { for (auto& plugin : m_plugins) {
plugin.load(); OPENVINO_ASSERT(plugin.load(), "Cannot load frontend ", plugin.get_name_from_file());
if (plugin.get_creator().m_name == framework) { if (plugin.get_creator().m_name == framework) {
auto fe_obj = std::make_shared<FrontEnd>(); auto fe_obj = std::make_shared<FrontEnd>();
fe_obj->m_shared_object = plugin.get_so_pointer(); fe_obj->m_shared_object = plugin.get_so_pointer();