From 74f6cf372613df9d712fe6c8556b5209b8989242 Mon Sep 17 00:00:00 2001 From: Andrey Sapozhnikov Date: Fri, 10 Dec 2021 20:36:46 +0300 Subject: [PATCH] [GNA] Coverity issues fix (#9119) --- src/plugins/intel_gna/backend/am_intel_dnn.cpp | 2 +- src/plugins/intel_gna/backend/am_intel_dnn.hpp | 4 ++-- src/plugins/intel_gna/backend/dnn_components.hpp | 2 +- .../intel_gna/descriptions/gna_output_desc.hpp | 2 +- src/plugins/intel_gna/frontend/scale_factor_calc.hpp | 4 ++++ src/plugins/intel_gna/gna_device.cpp | 8 +++++--- src/plugins/intel_gna/gna_graph_tools.hpp | 2 +- src/plugins/intel_gna/gna_model_serial.cpp | 1 - src/plugins/intel_gna/gna_plugin_internal.hpp | 10 +++++++++- src/plugins/intel_gna/layers/gna_layer_info.hpp | 6 ++++-- src/plugins/intel_gna/optimizer/gna_pass_manager.cpp | 3 +++ src/plugins/intel_gna/runtime/pwl.cpp | 12 ++---------- src/plugins/intel_gna/runtime/pwl.h | 4 ++-- 13 files changed, 35 insertions(+), 25 deletions(-) diff --git a/src/plugins/intel_gna/backend/am_intel_dnn.cpp b/src/plugins/intel_gna/backend/am_intel_dnn.cpp index 826cbfc3151..c77aa370280 100644 --- a/src/plugins/intel_gna/backend/am_intel_dnn.cpp +++ b/src/plugins/intel_gna/backend/am_intel_dnn.cpp @@ -367,7 +367,7 @@ void GNAPluginNS::backend::AMIntelDNN::InitCopyComponentPrivate(intel_dnn_compon } void GNAPluginNS::backend::AMIntelDNN::InitPiecewiseLinearComponentPrivate(intel_dnn_component_t &comp, - DnnActivation function_id, + const DnnActivation& function_id, intel_dnn_orientation_t orientation, uint32_t num_rows, uint32_t num_columns, diff --git a/src/plugins/intel_gna/backend/am_intel_dnn.hpp b/src/plugins/intel_gna/backend/am_intel_dnn.hpp index bd7423ba0b7..c87c8bf97b1 100644 --- a/src/plugins/intel_gna/backend/am_intel_dnn.hpp +++ b/src/plugins/intel_gna/backend/am_intel_dnn.hpp @@ -190,7 +190,7 @@ public: template static void InitPiecewiseLinearComponent(intel_dnn_component_t &cmp, - DnnActivation function_id, + const DnnActivation& function_id, intel_dnn_orientation_t orientation, uint32_t num_rows, uint32_t num_columns, @@ -392,7 +392,7 @@ private: bool postInitMem); static void InitPiecewiseLinearComponentPrivate(intel_dnn_component_t &cmp, - DnnActivation function_id, + const DnnActivation& function_id, intel_dnn_orientation_t orientation, uint32_t num_rows, uint32_t num_columns, diff --git a/src/plugins/intel_gna/backend/dnn_components.hpp b/src/plugins/intel_gna/backend/dnn_components.hpp index 5cda9f0cf98..6797d561073 100644 --- a/src/plugins/intel_gna/backend/dnn_components.hpp +++ b/src/plugins/intel_gna/backend/dnn_components.hpp @@ -17,7 +17,7 @@ struct DnnComponentExtra { intel_dnn_component_t dnnComponent; bool isDelayed; DnnComponentExtra(std::string name, - intel_dnn_component_t dnnComponent, + const intel_dnn_component_t& dnnComponent, bool isDelayed) : name(name), dnnComponent(dnnComponent), isDelayed(isDelayed) {} }; diff --git a/src/plugins/intel_gna/descriptions/gna_output_desc.hpp b/src/plugins/intel_gna/descriptions/gna_output_desc.hpp index 16237d19ff5..f9902004eaa 100644 --- a/src/plugins/intel_gna/descriptions/gna_output_desc.hpp +++ b/src/plugins/intel_gna/descriptions/gna_output_desc.hpp @@ -10,7 +10,7 @@ namespace GNAPluginNS { struct OutputDesc { - uint8_t precision; + uint8_t precision = InferenceEngine::Precision::UNSPECIFIED; double scale_factor = 1.0; uint32_t num_bytes_per_element = 0; uint32_t num_elements = 0; diff --git a/src/plugins/intel_gna/frontend/scale_factor_calc.hpp b/src/plugins/intel_gna/frontend/scale_factor_calc.hpp index 3ee90cd676c..5cc913a2550 100644 --- a/src/plugins/intel_gna/frontend/scale_factor_calc.hpp +++ b/src/plugins/intel_gna/frontend/scale_factor_calc.hpp @@ -1018,6 +1018,10 @@ class ScaleFactorPerLayer { THROW_GNA_EXCEPTION << "layers entered into concat have different scale factors. Layer name: " << concatLayer->name; } + if (sourceQuantParams == nullptr) { + THROW_GNA_EXCEPTION << "Source quantization parameters have not been initialized"; + } + quantData->_dst_quant.SetScale(sourceQuantParams->_dst_quant.GetScale()); quantData->_src_quant.SetScale(sourceQuantParams->_dst_quant.GetScale()); diff --git a/src/plugins/intel_gna/gna_device.cpp b/src/plugins/intel_gna/gna_device.cpp index f7c400e01d9..023799790da 100644 --- a/src/plugins/intel_gna/gna_device.cpp +++ b/src/plugins/intel_gna/gna_device.cpp @@ -556,10 +556,12 @@ void GNADeviceHelper::close() { gnawarn() << "Request with Id " << requestId << " was not awaited successfully"; } } - { - std::unique_lock lockGnaCalls{ acrossPluginsSync }; - const auto status = Gna2DeviceClose(nGnaDeviceIndex); + std::unique_lock lockGnaCalls{ acrossPluginsSync }; + const auto status = Gna2DeviceClose(nGnaDeviceIndex); + try { checkGna2Status(status, "Gna2DeviceClose"); + } catch (...) { + gnawarn() << "GNA Device was not successfully closed with status " << status << std::endl; } #endif deviceOpened = false; diff --git a/src/plugins/intel_gna/gna_graph_tools.hpp b/src/plugins/intel_gna/gna_graph_tools.hpp index 51701268209..07db58deedc 100644 --- a/src/plugins/intel_gna/gna_graph_tools.hpp +++ b/src/plugins/intel_gna/gna_graph_tools.hpp @@ -486,7 +486,7 @@ inline DataPtr CNNReplaceDataWithChangedTensorDescription(DataPtr old_data, Tens /** * @brief Creates a Reshape with given name and tensor description */ -inline CNNLayerPtr CNNNetworkCreateReshape(TensorDesc td, std::string name, bool quantized) { +inline CNNLayerPtr CNNNetworkCreateReshape(const TensorDesc& td, const std::string& name, bool quantized) { auto reshape = std::make_shared(LayerParams({name, "reshape", Precision::FP32})); auto reshapeLayerWithQuant = quantized ? InferenceEngine::injectData(reshape) : reshape; auto dataPtr = std::make_shared(name + "_data", td); diff --git a/src/plugins/intel_gna/gna_model_serial.cpp b/src/plugins/intel_gna/gna_model_serial.cpp index 62b37fdd991..bc31eba3359 100644 --- a/src/plugins/intel_gna/gna_model_serial.cpp +++ b/src/plugins/intel_gna/gna_model_serial.cpp @@ -925,7 +925,6 @@ void GNAModelSerial::ImportOutputs(std::istream &is, HeaderLatest::RuntimeEndPoint output = ReadEndPoint(is); OutputDesc description; description.ptrs.push_back(reinterpret_cast(reinterpret_cast (basePtr) + output.descriptor_offset)); - description.orientation = kDnnInterleavedOrientation; description.orientation = output.orientation; description.num_bytes_per_element = output.element_size; description.scale_factor = output.scaleFactor; diff --git a/src/plugins/intel_gna/gna_plugin_internal.hpp b/src/plugins/intel_gna/gna_plugin_internal.hpp index ba7eaf6e5fa..ad4b5fc753e 100644 --- a/src/plugins/intel_gna/gna_plugin_internal.hpp +++ b/src/plugins/intel_gna/gna_plugin_internal.hpp @@ -29,6 +29,9 @@ private: } } +protected: + std::string _pluginInternalName = "GNA"; + public: InferenceEngine::IExecutableNetworkInternal::Ptr LoadExeNetworkImpl( const InferenceEngine::CNNNetwork &network, @@ -67,7 +70,12 @@ public: } std::string GetName() const noexcept override { - return GetCurrentPlugin()->GetName(); + auto ptr = plgPtr.lock(); + if (ptr == nullptr) { + return _pluginInternalName; + } else { + return ptr->GetName(); + } } InferenceEngine::QueryNetworkResult QueryNetwork(const InferenceEngine::CNNNetwork& network, diff --git a/src/plugins/intel_gna/layers/gna_layer_info.hpp b/src/plugins/intel_gna/layers/gna_layer_info.hpp index f07f07cd371..ed6962b0a59 100644 --- a/src/plugins/intel_gna/layers/gna_layer_info.hpp +++ b/src/plugins/intel_gna/layers/gna_layer_info.hpp @@ -272,7 +272,7 @@ class LayerInfo { return isOfType("permute"); } // @brief this not only mathematically trivial, has some WA for kaldi case - bool isTrivialPermute() const { + bool isTrivialPermute() const noexcept { if (!isPermute()) return false; auto layerOrder = layer->GetParamAsInts("order"); @@ -280,7 +280,9 @@ class LayerInfo { if (layerOrder == std::vector({ 0, 3, 2, 1 })) { return true; // supported case } - IE_ASSERT(!layer->insData.empty()); + if (layer->insData.empty()) { + return false; // unsupported case + } auto inputs = layer->insData.begin()->lock(); auto inputsOrder = inputs->getTensorDesc().getDims(); diff --git a/src/plugins/intel_gna/optimizer/gna_pass_manager.cpp b/src/plugins/intel_gna/optimizer/gna_pass_manager.cpp index 0fd3bcbf065..e804289f5c9 100644 --- a/src/plugins/intel_gna/optimizer/gna_pass_manager.cpp +++ b/src/plugins/intel_gna/optimizer/gna_pass_manager.cpp @@ -1741,6 +1741,9 @@ void RemoveSingleInputConcatPass::run() { for (auto &l : *pLayers) { if (l->type == "Concat") { auto concat = dynamic_cast(l.get()); + if (concat == nullptr) { + THROW_GNA_EXCEPTION << "Layer has type Concat but faild during casting to ConcatLayer"; + } if (concat->insData.size() == 1 && concat->outData.size() > 0) { auto in = concat->insData[0]; auto in_layer = getCreatorLayer(in.lock()); diff --git a/src/plugins/intel_gna/runtime/pwl.cpp b/src/plugins/intel_gna/runtime/pwl.cpp index ae7d3edaec3..6d30647d11f 100644 --- a/src/plugins/intel_gna/runtime/pwl.cpp +++ b/src/plugins/intel_gna/runtime/pwl.cpp @@ -468,7 +468,7 @@ std::vector pwl_search(const DnnActivation& activation_type, } -void PwlDesignOpt(const DnnActivation activation_type, +void PwlDesignOpt(const DnnActivation& activation_type, std::vector &ptr_segment, const float scale_in, const float scale_out, @@ -582,7 +582,7 @@ void PwlDesignOpt(const DnnActivation activation_type, } } -void PwlDesign(const DnnActivation activation_type, +void PwlDesign(const DnnActivation& activation_type, gna_pwl_segment_t *ptr_segment, const uint32_t num_segments, const float scale_in, @@ -976,7 +976,6 @@ void PwlApply32(intel_dnn_component_t *component, } break; case kActFakeQuantize: { - bool clamping = true; double levels = transform->func_id.fqParams.levels; for (uint32_t i = num_row_start; i <= num_row_end; i++) { @@ -988,16 +987,9 @@ void PwlApply32(intel_dnn_component_t *component, double output_low = transform->func_id.fqParams.output_low[outputChannel]; double output_high = transform->func_id.fqParams.output_high[outputChannel]; - auto scaleInput = (levels - 1) / (input_high - input_low); - auto scaleOutput = (levels - 1) / (output_high - output_low); - for (uint32_t j = num_col_start; j <= num_col_end; j++) { auto offset = i * num_columns + j; auto x = ptr_in[offset]; - if (!clamping) { - ptr_out[offset] = ptr_in[offset] * scaleInput / scaleOutput; - continue; - } if (x <= std::min(input_low, input_high)) { ptr_out[offset] = output_low; diff --git a/src/plugins/intel_gna/runtime/pwl.h b/src/plugins/intel_gna/runtime/pwl.h index 53798e87e2a..a2071b37bf7 100644 --- a/src/plugins/intel_gna/runtime/pwl.h +++ b/src/plugins/intel_gna/runtime/pwl.h @@ -91,13 +91,13 @@ void PwlApply32(intel_dnn_component_t *component, const uint32_t num_row_end, const uint32_t num_col_start, const uint32_t num_col_end); -void PwlDesign(const DnnActivation activation_type, +void PwlDesign(const DnnActivation& activation_type, gna_pwl_segment_t *ptr_segment, const uint32_t num_segments, const float scale_in, const float scale_out, const bool low_precision); -void PwlDesignOpt(const DnnActivation activation_type, +void PwlDesignOpt(const DnnActivation& activation_type, std::vector &ptr_segment, const float scale_in, const float scale_out,