coverity Uninitialized scalar variable (#17182)

Signed-off-by: Hu Yuan2 <yuan2.hu@intel.com>
This commit is contained in:
Yuan Hu 2023-04-27 03:49:21 +08:00 committed by GitHub
parent dbaa1f0c0d
commit cecd0e75a6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 45 additions and 44 deletions

View File

@ -167,7 +167,7 @@ private:
MemoryPtr legacyWeightsZeroPointsMemPtr;
MemoryPtr legacyOutputCompensationMemPtr;
MemoryPtr stockInputZeroPointsMemPtr;
dnnl::memory::data_type outputDataType;
dnnl::memory::data_type outputDataType = dnnl::memory::data_type::undef;
InferenceEngine::Precision sumPrc = InferenceEngine::Precision::UNSPECIFIED;
// TODO: migrate on convolution_auto algorithm for x64

View File

@ -89,8 +89,8 @@ private:
bool autoPad = false;
bool externOutShape = false;
size_t groupNum = 1;
size_t IC;
size_t OC;
size_t IC = 0;
size_t OC = 0;
std::vector<ptrdiff_t> kernel;
std::vector<ptrdiff_t> stride;
std::vector<ptrdiff_t> dilation;
@ -105,7 +105,7 @@ private:
AttrPtr pAttr;
dnnl::memory::data_type outputDataType;
dnnl::memory::data_type outputDataType = dnnl::memory::data_type::undef;
std::shared_ptr<dnnl::primitive_attr> attr;
void setPostOps(dnnl::primitive_attr &attr, const VectorDims &dims);

View File

@ -78,12 +78,12 @@ private:
static const size_t DATA_ID = 0;
static const size_t WEIGHTS_ID = 1;
static const size_t BIAS_ID = 2;
dnnl::memory::data_type outputDataType;
dnnl::memory::data_type outputDataType = dnnl::memory::data_type::undef;
using executorPtr = std::shared_ptr<DnnlExecutor>;
executorPtr execPtr = nullptr;
bool useConv1x1 = false;
impl_desc_type implementationTypeIP;
impl_desc_type implementationTypeIP = impl_desc_type::unknown;
MemoryDescPtr weightDescIP;
dnnl::primitive_attr attr;

View File

@ -228,7 +228,7 @@ private:
static size_t getSpatialDimsNum(const Dim rank);
bool hasPad = false;
InterpolateShapeCalcMode shapeCalcMode;
InterpolateShapeCalcMode shapeCalcMode = InterpolateShapeCalcMode::sizes;
bool isAxesSpecified = false;
std::vector<int> axes;

View File

@ -35,7 +35,7 @@ struct jit_dft_args {
struct jit_dft_kernel {
jit_dft_kernel(bool is_inverse, enum dft_type type) : is_inverse_(is_inverse), kernel_type_(type) {}
void (*ker_)(const jit_dft_args*);
void (*ker_)(const jit_dft_args*) = nullptr;
void operator()(const jit_dft_args* args) {
assert(ker_);

View File

@ -142,12 +142,13 @@ protected:
private:
struct brgemmCtx {
size_t M, N, K, LDA, LDB, LDC;
dnnl_data_type_t dt_in0, dt_in1;
size_t M = 0, N = 0, K = 0, LDA = 0, LDB = 0, LDC = 0;
dnnl_data_type_t dt_in0 = dnnl_data_type_undef;
dnnl_data_type_t dt_in1 = dnnl_data_type_undef;
char palette[64];
bool is_with_amx;
bool is_with_comp;
float beta;
bool is_with_amx = false;
bool is_with_comp = false;
float beta = 0.0f;
};
template <typename in1_type>
@ -190,18 +191,18 @@ private:
VectorDims dimsMatMul1In1;
VectorDims dimsMatMul1Out;
size_t batch0, batch1;
size_t M, M_blk, M_tail;
size_t K0, K0_blk, K0_tail, N0, N0_blk, N0_tail;
size_t K1, K1_blk, K1_tail, N1, N1_blk, N1_tail;
size_t batch0 = 0, batch1 = 0;
size_t M = 0, M_blk = 0, M_tail = 0;
size_t K0 = 0, K0_blk = 0, K0_tail = 0, N0 = 0, N0_blk = 0, N0_tail = 0;
size_t K1 = 0, K1_blk = 0, K1_tail = 0, N1 = 0, N1_blk = 0, N1_tail = 0;
size_t bufferMatMul0In0Size;
size_t bufferMatMul0In1Size;
size_t bufferMatMul0OutSize;
size_t bufferMatMul1In1Size;
size_t bufferMatMul1OutSize;
size_t bufferCompensation0Size;
size_t bufferCompensation1Size;
size_t bufferMatMul0In0Size = 0;
size_t bufferMatMul0In1Size = 0;
size_t bufferMatMul0OutSize = 0;
size_t bufferMatMul1In1Size = 0;
size_t bufferMatMul1OutSize = 0;
size_t bufferCompensation0Size = 0;
size_t bufferCompensation1Size = 0;
size_t wsp_size_per_thread = 4 * 1024;
std::vector<uint8_t> bufferMatMul0In0;
@ -222,13 +223,13 @@ private:
std::vector<float> fqScales2;
std::vector<float> fqScales3;
size_t brg0VnniFactor;
size_t brg0VnniFactor = 0;
brgemmCtx brgCtxs0[MHA_BRGEMM_KERNELS_NUM];
std::unique_ptr<dnnl::impl::cpu::x64::brgemm_kernel_t> brgKernels0[MHA_BRGEMM_KERNELS_NUM];
std::unique_ptr<dnnl::impl::cpu::x64::matmul::jit_brgemm_matmul_copy_a_t> brgCopyAKernel0;
std::unique_ptr<dnnl::impl::cpu::x64::matmul::jit_brgemm_matmul_copy_b_t> brgCopyBKernel0;
size_t brg1VnniFactor;
size_t brg1VnniFactor = 0;
brgemmCtx brgCtxs1[MHA_BRGEMM_KERNELS_NUM];
std::unique_ptr<dnnl::impl::cpu::x64::brgemm_kernel_t> brgKernels1[MHA_BRGEMM_KERNELS_NUM];
std::unique_ptr<dnnl::impl::cpu::x64::matmul::jit_brgemm_matmul_copy_b_t> brgCopyBKernel1;

View File

@ -78,7 +78,7 @@ private:
// Holds ISA version used is codeGeneration target
dnnl::impl::cpu::x64::cpu_isa_t host_isa;
size_t isa_num_lanes; // number of elements that fit in vector size
size_t isa_num_lanes = 0; // number of elements that fit in vector size
// Holds index of output used as in execution domain
// it should be compatible with a schedule's work size

View File

@ -113,27 +113,27 @@ private:
void preset_params();
void prepare_original_idx();
bool topk_innermost;
bool jit_mode;
bool sort_index;
bool stable;
bool mode_max;
int axis;
bool topk_innermost = false;
bool jit_mode = false;
bool sort_index = false;
bool stable = false;
bool mode_max = false;
int axis = 0;
static const size_t TOPK_DATA = 0;
static const size_t TOPK_K = 1;
static const size_t TOPK_INDEX = 1;
size_t O, A, I;
size_t blk_size;
size_t data_size;
size_t axis_dim;
int top_k;
int dim, before_num;
bool bubble_inplace;
bool preset_params_done;
size_t O = 0, A = 0, I = 0;
size_t blk_size = 0;
size_t data_size = 0;
size_t axis_dim = 0;
int top_k = 0;
int dim = 0, before_num = 0;
bool bubble_inplace = false;
bool preset_params_done = false;
VectorDims src_dims, dst_dims;
TopKLayoutType layout;
TopKAlgorithm algorithm;
TopKLayoutType layout = TopKLayoutType::topk_ncsp;
TopKAlgorithm algorithm = TopKAlgorithm::topk_bubble_sort;
std::vector<int> vec_bitonic_idx;
std::vector<int> vec_bitonic_k_idx;