[IE CLDNN] Fix missing variable initializations and types (#4669)

This commit is contained in:
Mikhail Letavin
2021-03-10 16:23:10 +03:00
committed by GitHub
parent 3a80652d70
commit c6d8905a88
4 changed files with 12 additions and 12 deletions

View File

@@ -21,7 +21,7 @@ CLDNNRemoteBlobImpl::CLDNNRemoteBlobImpl(ClContext::Ptr context,
uint32_t plane,
BlobType mem_type) :
m_context(context), m_layout(layout), m_mem_type(mem_type), m_mem(mem), m_surf(surf), m_plane(plane),
_handle(nullptr) {
_handle(nullptr), _allocator(nullptr), m_memObject(nullptr), lockedHolder(nullptr) {
}
ParamMap CLDNNRemoteBlobImpl::getParams() const {

View File

@@ -25,7 +25,7 @@ struct ctc_greedy_decoder_params : public base_params {
ctc_greedy_decoder_params() : base_params(KernelType::CTC_GREEDY_DECODER) {}
bool merge_repeated = true;
uint32_t blank_index;
uint32_t blank_index = 0;
uint32_t outputs_num = 1;
};

View File

@@ -27,9 +27,9 @@ struct mvn_params : public base_params {
mvn_params() : base_params(KernelType::MVN) {}
MVNMode mvnMode = MVNMode::WITHIN_CHANNELS;
bool mvnNormalizeVariance;
float epsilon;
MVNEpsMode mvnEpsMode;
bool mvnNormalizeVariance = false;
float epsilon = 0.0f;
MVNEpsMode mvnEpsMode = MVNEpsMode::INSIDE_SQRT;
virtual ParamsKey GetParamsKey() const {
ParamsKey k = base_params::GetParamsKey();

View File

@@ -393,19 +393,19 @@ bool layout_optimizer::convolution_b_fs_yx_fsv16_opt(const layout& input_layout,
}
// A set of rules that define when b_fs_yx_fsv16 mem format can be used for fp16/fp32 case
int32_t feature_block_size = 16;
int32_t correct_data_type = input_layout.data_type == data_types::f16 || input_layout.data_type == data_types::f32;
correct_data_type &= weights_layout.data_type == input_layout.data_type;
int32_t correct_batch = (input_layout.size.batch[0] == 1) || (input_layout.size.batch[0] > 1 && input_layout.data_type == data_types::f32);
int32_t correct_spatial_dims = input_layout.size.spatial[2] == 1 && input_layout.size.spatial[3] == 1;
bool correct_data_type = (input_layout.data_type == data_types::f16 || input_layout.data_type == data_types::f32) &&
(weights_layout.data_type == input_layout.data_type);
bool correct_batch = (input_layout.size.batch[0] == 1) || (input_layout.size.batch[0] > 1 && input_layout.data_type == data_types::f32);
bool correct_spatial_dims = input_layout.size.spatial[2] == 1 && input_layout.size.spatial[3] == 1;
int32_t required_feature_num = weak_restrictions ? feature_block_size / 2 : feature_block_size;
int32_t correct_in_feature = (input_layout.size.feature[0] >= required_feature_num &&
bool correct_in_feature = (input_layout.size.feature[0] >= required_feature_num &&
output_layout.size.feature[0] >= required_feature_num);
int32_t in_features_per_group = input_layout.size.feature[0] / conv->groups;
int32_t out_features_per_group = output_layout.size.feature[0] / conv->groups;
if (!correct_in_feature && input_layout.size.feature[0] <= 4 && out_features_per_group >= feature_block_size)
correct_in_feature = true;
int32_t depthwise = conv->groups == static_cast<uint32_t>(input_layout.size.feature[0]); // depthwise conv
int32_t grouped = ((feature_block_size % out_features_per_group == 0) &&
bool depthwise = conv->groups == static_cast<uint32_t>(input_layout.size.feature[0]); // depthwise conv
bool grouped = ((feature_block_size % out_features_per_group == 0) &&
(feature_block_size % in_features_per_group == 0) &&
(feature_block_size / out_features_per_group > 1) &&
(feature_block_size / in_features_per_group > 1) &&