From c9d170043099b2a759fd0dcda740781113f40dc4 Mon Sep 17 00:00:00 2001 From: Mikhail Letavin Date: Tue, 16 Mar 2021 08:28:00 +0300 Subject: [PATCH] [IE CLDNN] Fix missing variable initializations and types (#4670) --- .../src/cldnn_engine/cldnn_remote_context.cpp | 2 +- .../ctc_greedy_decoder_kernel_base.h | 2 +- .../core/actual_kernels/mvn/mvn_kernel_base.h | 6 +++--- .../thirdparty/clDNN/src/layout_optimizer.cpp | 14 +++++++------- 4 files changed, 12 insertions(+), 12 deletions(-) diff --git a/inference-engine/src/cldnn_engine/cldnn_remote_context.cpp b/inference-engine/src/cldnn_engine/cldnn_remote_context.cpp index 5d8c0bf1916..03b4d6bfc73 100644 --- a/inference-engine/src/cldnn_engine/cldnn_remote_context.cpp +++ b/inference-engine/src/cldnn_engine/cldnn_remote_context.cpp @@ -21,7 +21,7 @@ CLDNNRemoteBlobImpl::CLDNNRemoteBlobImpl(ClContext::Ptr context, uint32_t plane, BlobType mem_type) : m_context(context), m_layout(layout), m_mem_type(mem_type), m_mem(mem), m_surf(surf), m_plane(plane), - _handle(nullptr) { + _handle(nullptr), _allocator(nullptr), m_memObject(nullptr), lockedHolder(nullptr) { } ParamMap CLDNNRemoteBlobImpl::getParams() const { diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/ctc_greedy_decoder/ctc_greedy_decoder_kernel_base.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/ctc_greedy_decoder/ctc_greedy_decoder_kernel_base.h index 218216bce32..cf8a11a1d64 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/ctc_greedy_decoder/ctc_greedy_decoder_kernel_base.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/ctc_greedy_decoder/ctc_greedy_decoder_kernel_base.h @@ -25,7 +25,7 @@ struct ctc_greedy_decoder_params : public base_params { ctc_greedy_decoder_params() : base_params(KernelType::CTC_GREEDY_DECODER) {} bool merge_repeated = true; - uint32_t blank_index; + uint32_t blank_index = 0; uint32_t outputs_num = 1; }; diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/mvn/mvn_kernel_base.h b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/mvn/mvn_kernel_base.h index f7c37d82674..5ca0e8fe5c4 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/mvn/mvn_kernel_base.h +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/mvn/mvn_kernel_base.h @@ -27,9 +27,9 @@ struct mvn_params : public base_params { mvn_params() : base_params(KernelType::MVN) {} MVNMode mvnMode = MVNMode::WITHIN_CHANNELS; - bool mvnNormalizeVariance; - float epsilon; - MVNEpsMode mvnEpsMode; + bool mvnNormalizeVariance = false; + float epsilon = 0.0f; + MVNEpsMode mvnEpsMode = MVNEpsMode::INSIDE_SQRT; virtual ParamsKey GetParamsKey() const { ParamsKey k = base_params::GetParamsKey(); diff --git a/inference-engine/thirdparty/clDNN/src/layout_optimizer.cpp b/inference-engine/thirdparty/clDNN/src/layout_optimizer.cpp index a73f5993cbb..6c843c513bd 100644 --- a/inference-engine/thirdparty/clDNN/src/layout_optimizer.cpp +++ b/inference-engine/thirdparty/clDNN/src/layout_optimizer.cpp @@ -395,19 +395,19 @@ bool layout_optimizer::convolution_b_fs_yx_fsv16_opt(const layout& input_layout, } // A set of rules that define when b_fs_yx_fsv16 mem format can be used for fp16/fp32 case int32_t feature_block_size = 16; - int32_t correct_data_type = input_layout.data_type == data_types::f16 || input_layout.data_type == data_types::f32; - correct_data_type &= weights_layout.data_type == input_layout.data_type; - int32_t correct_batch = (input_layout.size.batch[0] == 1) || (input_layout.size.batch[0] > 1 && input_layout.data_type == data_types::f32); - int32_t correct_spatial_dims = input_layout.size.spatial[2] == 1 && input_layout.size.spatial[3] == 1; + bool correct_data_type = (input_layout.data_type == data_types::f16 || input_layout.data_type == data_types::f32) && + (weights_layout.data_type == input_layout.data_type); + bool correct_batch = (input_layout.size.batch[0] == 1) || (input_layout.size.batch[0] > 1 && input_layout.data_type == data_types::f32); + bool correct_spatial_dims = input_layout.size.spatial[2] == 1 && input_layout.size.spatial[3] == 1; int32_t required_feature_num = weak_restrictions ? feature_block_size / 2 : feature_block_size; - int32_t correct_in_feature = (input_layout.size.feature[0] >= required_feature_num && + bool correct_in_feature = (input_layout.size.feature[0] >= required_feature_num && output_layout.size.feature[0] >= required_feature_num); int32_t in_features_per_group = input_layout.size.feature[0] / conv->groups; int32_t out_features_per_group = output_layout.size.feature[0] / conv->groups; if (!correct_in_feature && input_layout.size.feature[0] <= 4 && out_features_per_group >= feature_block_size) correct_in_feature = true; - int32_t depthwise = conv->groups == static_cast(input_layout.size.feature[0]); // depthwise conv - int32_t grouped = ((feature_block_size % out_features_per_group == 0) && + bool depthwise = conv->groups == static_cast(input_layout.size.feature[0]); // depthwise conv + bool grouped = ((feature_block_size % out_features_per_group == 0) && (feature_block_size % in_features_per_group == 0) && (feature_block_size / out_features_per_group > 1) && (feature_block_size / in_features_per_group > 1) &&