[CPU] Statically analyzed issues. (#4636)
This commit is contained in:
parent
3e22f33224
commit
85e493ecb9
@ -91,7 +91,7 @@ TensorDescCreator::makeFilteredRange(const CreatorsMap &map, unsigned int rank)
|
||||
|
||||
std::pair<CreatorsMapFilterConstIterator, CreatorsMapFilterConstIterator>
|
||||
TensorDescCreator::makeFilteredRange(const CreatorsMap& map, unsigned rank, const std::vector<TensorDescCreatorTypes>& supportedTypes) {
|
||||
size_t bitMask = 0ul;
|
||||
unsigned bitMask = 0ul;
|
||||
for (auto& item : supportedTypes) {
|
||||
bitMask |= 1 << static_cast<unsigned>(item);
|
||||
}
|
||||
|
@ -205,7 +205,7 @@ private:
|
||||
void parallelItInit(size_t start, std::vector<size_t>& counters, const std::vector<size_t>& iterationRange) {
|
||||
auto itCounter = counters.rbegin();
|
||||
auto itWork = iterationRange.rbegin();
|
||||
while (itCounter != counters.rend()) {
|
||||
while (itCounter != counters.rend() && itWork != iterationRange.rend()) {
|
||||
*itCounter = start % *itWork;
|
||||
start /= *itWork;
|
||||
++itCounter;
|
||||
@ -217,7 +217,7 @@ private:
|
||||
auto itCounter = counters.rbegin();
|
||||
auto itWork = iterationRange.rbegin();
|
||||
|
||||
while (itCounter != counters.rend()) {
|
||||
while (itCounter != counters.rend() && itWork != iterationRange.rend()) {
|
||||
*itCounter = (*itCounter + 1) % *itWork;
|
||||
if (*itCounter != 0) {
|
||||
break;
|
||||
|
@ -991,13 +991,17 @@ void MKLDNNBinaryConvolutionNode::initSupportedPrimitiveDescriptors() {
|
||||
}
|
||||
|
||||
void MKLDNNBinaryConvolutionNode::createPrimitive() {
|
||||
auto config = getSelectedPrimitiveDescriptor()->getConfig();
|
||||
auto selectedPrimitiveDescriptor = getSelectedPrimitiveDescriptor();
|
||||
if (!selectedPrimitiveDescriptor)
|
||||
THROW_IE_EXCEPTION << "CPU binary convolution with name '" << getName() << "' doesn't have primitive descriptors.";
|
||||
|
||||
auto config = selectedPrimitiveDescriptor->getConfig();
|
||||
|
||||
auto srcDims = config.inConfs[0].desc.getDims();
|
||||
auto weiDims = config.inConfs[1].desc.getDims();
|
||||
auto dstDims = config.outConfs[0].desc.getDims();
|
||||
|
||||
auto implType = getSelectedPrimitiveDescriptor()->getImplementationType();
|
||||
auto implType = selectedPrimitiveDescriptor->getImplementationType();
|
||||
|
||||
jcp.ngroups = group;
|
||||
jcp.mb = srcDims[0];
|
||||
@ -1295,7 +1299,11 @@ void MKLDNNBinaryConvolutionNode::execute(mkldnn::stream strm) {
|
||||
auto weights = reinterpret_cast<const uint8_t*>(weightsMemory->GetPtr());
|
||||
auto dst = reinterpret_cast<uint8_t*>(dstMemory->GetPtr());
|
||||
|
||||
auto config = getSelectedPrimitiveDescriptor()->getConfig();
|
||||
auto selectedPrimitiveDescriptor = getSelectedPrimitiveDescriptor();
|
||||
if (!selectedPrimitiveDescriptor)
|
||||
THROW_IE_EXCEPTION << "CPU binary convolution with name '" << getName() << "' doesn't have primitive descriptors.";
|
||||
|
||||
auto config = selectedPrimitiveDescriptor->getConfig();
|
||||
|
||||
auto srcBlockDesc = config.inConfs[0].desc.getBlockingDesc();
|
||||
std::vector<size_t> srcStride(srcBlockDesc.getStrides().size());
|
||||
@ -1315,7 +1323,7 @@ void MKLDNNBinaryConvolutionNode::execute(mkldnn::stream strm) {
|
||||
dstStride[dstBlockDesc.getOrder()[i]] = dstBlockDesc.getStrides()[i];
|
||||
}
|
||||
|
||||
auto implType = getSelectedPrimitiveDescriptor()->getImplementationType();
|
||||
auto implType = selectedPrimitiveDescriptor->getImplementationType();
|
||||
if (implType != impl_desc_type::ref) {
|
||||
executeOptimized(src, weights, dst, srcStride, weightsStride, dstStride);
|
||||
} else {
|
||||
|
@ -854,7 +854,10 @@ void MKLDNNDeformableConvolutionNode::initSupportedPrimitiveDescriptors() {
|
||||
}
|
||||
|
||||
void MKLDNNDeformableConvolutionNode::createPrimitive() {
|
||||
auto config = getSelectedPrimitiveDescriptor()->getConfig();
|
||||
auto selectedPrimitiveDescriptor = getSelectedPrimitiveDescriptor();
|
||||
if (!selectedPrimitiveDescriptor)
|
||||
THROW_IE_EXCEPTION << "CPU deformable convolution with name '" << getName() << "' doesn't have primitive descriptors.";
|
||||
auto config = selectedPrimitiveDescriptor->getConfig();
|
||||
|
||||
auto srcDims = config.inConfs[0].desc.getDims();
|
||||
auto weiDims = config.inConfs[2].desc.getDims();
|
||||
@ -1057,7 +1060,10 @@ void MKLDNNDeformableConvolutionNode::execute(mkldnn::stream strm) {
|
||||
const auto *weights = reinterpret_cast<const float *>(srcMemory2.GetPtr());
|
||||
float *dst = reinterpret_cast<float *>(dstMemory.GetPtr());
|
||||
|
||||
auto config = getSelectedPrimitiveDescriptor()->getConfig();
|
||||
auto selectedPrimitiveDescriptor = getSelectedPrimitiveDescriptor();
|
||||
if (!selectedPrimitiveDescriptor)
|
||||
THROW_IE_EXCEPTION << "CPU deformable convolution with name '" << getName() << "' doesn't have primitive descriptors.";
|
||||
auto config = selectedPrimitiveDescriptor->getConfig();
|
||||
|
||||
auto src_block_desc = config.inConfs[0].desc.getBlockingDesc();
|
||||
std::vector<size_t> src_strides(src_block_desc.getStrides().size());
|
||||
|
@ -942,7 +942,7 @@ void MKLDNNMVNNode::mvn_pln(const uint8_t* src_data, uint8_t* dst_data, const Si
|
||||
arg.src_stride = src_stride_size;
|
||||
arg.dst_stride = dst_stride_size;
|
||||
arg.work_amount = static_cast<size_t>(C2 / blk_size); // work amount for vector part
|
||||
arg.oc_off = static_cast<size_t>(c * sizeof(float));
|
||||
arg.oc_off = sizeof(float) * c;
|
||||
(*mvn_kernel)(&arg);
|
||||
});
|
||||
} else {
|
||||
@ -956,7 +956,7 @@ void MKLDNNMVNNode::mvn_pln(const uint8_t* src_data, uint8_t* dst_data, const Si
|
||||
arg.src_stride = src_stride_size;
|
||||
arg.dst_stride = dst_stride_size;
|
||||
arg.work_amount = static_cast<size_t>(C2 / blk_size);
|
||||
arg.oc_off = static_cast<size_t>(c * sizeof(float));
|
||||
arg.oc_off = sizeof(float) * c;
|
||||
(*mvn_kernel)(&arg);
|
||||
});
|
||||
}
|
||||
|
@ -252,7 +252,10 @@ void MKLDNNPadNode::padConstant() {
|
||||
return;
|
||||
}
|
||||
|
||||
InferenceEngine::Precision precision = this->getSelectedPrimitiveDescriptor()->getConfig().inConfs[0].desc.getPrecision();
|
||||
auto selectedPrimitiveDescriptor = getSelectedPrimitiveDescriptor();
|
||||
if (!selectedPrimitiveDescriptor)
|
||||
THROW_IE_EXCEPTION << "CPU Pad node with name '" << getName() << "' doesn't have primitive descriptors.";
|
||||
InferenceEngine::Precision precision = selectedPrimitiveDescriptor->getConfig().inConfs[0].desc.getPrecision();
|
||||
OV_SWITCH(MKLDNNPlugin, PadConstantEmitter, this, precision,
|
||||
OV_CASE(InferenceEngine::Precision::FP32, float),
|
||||
OV_CASE(InferenceEngine::Precision::I32, int32_t),
|
||||
|
@ -49,11 +49,11 @@ private:
|
||||
InferenceEngine::SizeVector srcStrides;
|
||||
InferenceEngine::SizeVector dstStrides;
|
||||
InferenceEngine::SizeVector srcDimsForReflectOrSymmetric;
|
||||
size_t nDimsForWork;
|
||||
size_t workAmount;
|
||||
size_t lastDstDim;
|
||||
size_t shift;
|
||||
uint8_t sizeData;
|
||||
size_t nDimsForWork = 0lu;
|
||||
size_t workAmount = 0lu;
|
||||
size_t lastDstDim = 1lu;
|
||||
size_t shift = 0lu;
|
||||
uint8_t sizeData = 1;
|
||||
} params;
|
||||
|
||||
template<typename T>
|
||||
|
@ -279,8 +279,8 @@ private:
|
||||
Reg64 reg_output_scale = rbx;
|
||||
Reg64 reg_output_shift = rdx;
|
||||
|
||||
bool do_rounding;
|
||||
bool do_dequantization;
|
||||
bool do_rounding = true;
|
||||
bool do_dequantization = true;
|
||||
|
||||
inline void compute_planar() {
|
||||
int src_type_size = jqp_.src_prc.size();
|
||||
@ -1209,7 +1209,11 @@ void MKLDNNQuantizeNode::createPrimitive() {
|
||||
|
||||
jqp.op_type = quantizeOpType;
|
||||
|
||||
if (getSelectedPrimitiveDescriptor()->getImplementationType() != impl_desc_type::ref) {
|
||||
auto selectedPrimitiveDescriptor = getSelectedPrimitiveDescriptor();
|
||||
if (!selectedPrimitiveDescriptor)
|
||||
THROW_IE_EXCEPTION << "CPU quantize node with name '" << getName() << "' doesn't have primitive descriptors.";
|
||||
|
||||
if (selectedPrimitiveDescriptor->getImplementationType() != impl_desc_type::ref) {
|
||||
if (mayiuse(cpu::x64::avx512_common)) {
|
||||
if (isBinarization())
|
||||
quantize_kernel.reset(new jit_uni_binarization_kernel<cpu::x64::avx512_common>(jqp));
|
||||
@ -1523,7 +1527,11 @@ void MKLDNNQuantizeNode::executeQuantization() {
|
||||
}
|
||||
|
||||
void MKLDNNQuantizeNode::execute(mkldnn::stream strm) {
|
||||
if (getSelectedPrimitiveDescriptor()->getImplementationType() != impl_desc_type::ref) {
|
||||
auto selectedPrimitiveDescriptor = getSelectedPrimitiveDescriptor();
|
||||
if (!selectedPrimitiveDescriptor)
|
||||
THROW_IE_EXCEPTION << "CPU quantize node with name '" << getName() << "' doesn't have primitive descriptors.";
|
||||
|
||||
if (selectedPrimitiveDescriptor->getImplementationType() != impl_desc_type::ref) {
|
||||
if (jqp.op_type == QuantizeOpType::Binarization)
|
||||
executeBinarization();
|
||||
else
|
||||
|
@ -332,7 +332,10 @@ void MKLDNNROIPoolingNode::initSupportedPrimitiveDescriptors() {
|
||||
}
|
||||
|
||||
void MKLDNNROIPoolingNode::createPrimitive() {
|
||||
auto config = getSelectedPrimitiveDescriptor()->getConfig();
|
||||
auto selectedPrimitiveDescriptor = getSelectedPrimitiveDescriptor();
|
||||
if (!selectedPrimitiveDescriptor)
|
||||
THROW_IE_EXCEPTION << "CPU ROI Pooling node with name '" << getName() << "' doesn't have primitive descriptors.";
|
||||
auto config = selectedPrimitiveDescriptor->getConfig();
|
||||
|
||||
const int simd_w = mayiuse(cpu::x64::avx512_common) ? 16 : 8;
|
||||
jpp.c_block = simd_w;
|
||||
@ -378,7 +381,10 @@ void MKLDNNROIPoolingNode::execute(mkldnn::stream strm) {
|
||||
const auto *src_roi = reinterpret_cast<const float *>(srcMemory1.GetPtr());
|
||||
float *dst = reinterpret_cast<float *>(dstMemory.GetPtr());
|
||||
|
||||
auto config = getSelectedPrimitiveDescriptor()->getConfig();
|
||||
auto selectedPrimitiveDescriptor = getSelectedPrimitiveDescriptor();
|
||||
if (!selectedPrimitiveDescriptor)
|
||||
THROW_IE_EXCEPTION << "CPU ROI Pooling node with name '" << getName() << "' doesn't have primitive descriptors.";
|
||||
auto config = selectedPrimitiveDescriptor->getConfig();
|
||||
|
||||
auto src_strides = config.inConfs[0].desc.getBlockingDesc().getStrides();
|
||||
auto dst_strides = config.outConfs[0].desc.getBlockingDesc().getStrides();
|
||||
@ -526,8 +532,8 @@ void MKLDNNROIPoolingNode::execute(mkldnn::stream strm) {
|
||||
arg.xf = in_x - left_x_index;
|
||||
arg.yf = in_y - top_y_index;
|
||||
|
||||
arg.xoff = (size_t) ((right_x_index - left_x_index) * jpp.c_block * sizeof(float));
|
||||
arg.yoff = (size_t) ((bottom_y_index - top_y_index) * jpp.iw * jpp.c_block * sizeof(float));
|
||||
arg.xoff = sizeof(float) * (right_x_index - left_x_index) * jpp.c_block;
|
||||
arg.yoff = sizeof(float) * (bottom_y_index - top_y_index) * jpp.iw * jpp.c_block;
|
||||
|
||||
arg.src = &src_data[roi_batch_ind * src_strides[0] + cb * src_strides[1] +
|
||||
top_y_index * src_strides[2] + left_x_index * src_strides[3]];
|
||||
|
@ -458,7 +458,10 @@ void MKLDNNSplitNode::setDynamicBatchLim(int lim) {
|
||||
}
|
||||
|
||||
void MKLDNNSplitNode::prepareOptimizedParams() {
|
||||
const auto& inpTensorDesc = this->getSelectedPrimitiveDescriptor()->getConfig().inConfs[0].desc;
|
||||
auto selectedPrimitiveDescriptor = getSelectedPrimitiveDescriptor();
|
||||
if (!selectedPrimitiveDescriptor)
|
||||
THROW_IE_EXCEPTION << "CPU Split node with name '" << getName() << "' doesn't have primitive descriptors.";
|
||||
const auto& inpTensorDesc = selectedPrimitiveDescriptor->getConfig().inConfs[0].desc;
|
||||
const auto outputPortsCount = outDims.size();
|
||||
|
||||
//find axis order position
|
||||
|
@ -433,11 +433,11 @@ private:
|
||||
size_t num_boxes;
|
||||
size_t num_classes;
|
||||
|
||||
size_t max_output_boxes_per_class;
|
||||
float iou_threshold;
|
||||
float score_threshold;
|
||||
float soft_nms_sigma;
|
||||
float scale;
|
||||
size_t max_output_boxes_per_class = 0lu;
|
||||
float iou_threshold = 0.0f;
|
||||
float score_threshold = 0.0f;
|
||||
float soft_nms_sigma = 0.0f;
|
||||
float scale = 1.f;
|
||||
|
||||
std::vector<std::vector<size_t>> numFiltBox;
|
||||
const std::string inType = "input", outType = "output";
|
||||
|
@ -40,8 +40,8 @@ struct jit_args_logistic {
|
||||
struct jit_logistic_config_params {
|
||||
InferenceEngine::Precision src_dt;
|
||||
InferenceEngine::Precision dst_dt;
|
||||
unsigned src_data_size;
|
||||
unsigned dst_data_size;
|
||||
unsigned src_data_size = 0;
|
||||
unsigned dst_data_size = 0;
|
||||
};
|
||||
|
||||
struct jit_uni_logistic_kernel {
|
||||
|
Loading…
Reference in New Issue
Block a user