[GNA] Coverity issues fix (#9119)

This commit is contained in:
Andrey Sapozhnikov 2021-12-10 20:36:46 +03:00 committed by GitHub
parent 562d388ad9
commit 74f6cf3726
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
13 changed files with 35 additions and 25 deletions

View File

@ -367,7 +367,7 @@ void GNAPluginNS::backend::AMIntelDNN::InitCopyComponentPrivate(intel_dnn_compon
}
void GNAPluginNS::backend::AMIntelDNN::InitPiecewiseLinearComponentPrivate(intel_dnn_component_t &comp,
DnnActivation function_id,
const DnnActivation& function_id,
intel_dnn_orientation_t orientation,
uint32_t num_rows,
uint32_t num_columns,

View File

@ -190,7 +190,7 @@ public:
template<class A, class B>
static void InitPiecewiseLinearComponent(intel_dnn_component_t &cmp,
DnnActivation function_id,
const DnnActivation& function_id,
intel_dnn_orientation_t orientation,
uint32_t num_rows,
uint32_t num_columns,
@ -392,7 +392,7 @@ private:
bool postInitMem);
static void InitPiecewiseLinearComponentPrivate(intel_dnn_component_t &cmp,
DnnActivation function_id,
const DnnActivation& function_id,
intel_dnn_orientation_t orientation,
uint32_t num_rows,
uint32_t num_columns,

View File

@ -17,7 +17,7 @@ struct DnnComponentExtra {
intel_dnn_component_t dnnComponent;
bool isDelayed;
DnnComponentExtra(std::string name,
intel_dnn_component_t dnnComponent,
const intel_dnn_component_t& dnnComponent,
bool isDelayed) :
name(name), dnnComponent(dnnComponent), isDelayed(isDelayed) {}
};

View File

@ -10,7 +10,7 @@
namespace GNAPluginNS {
struct OutputDesc {
uint8_t precision;
uint8_t precision = InferenceEngine::Precision::UNSPECIFIED;
double scale_factor = 1.0;
uint32_t num_bytes_per_element = 0;
uint32_t num_elements = 0;

View File

@ -1018,6 +1018,10 @@ class ScaleFactorPerLayer<InferenceEngine::ConcatLayer*, QUANT_DESC> {
THROW_GNA_EXCEPTION << "layers entered into concat have different scale factors. Layer name: " << concatLayer->name;
}
if (sourceQuantParams == nullptr) {
THROW_GNA_EXCEPTION << "Source quantization parameters have not been initialized";
}
quantData->_dst_quant.SetScale(sourceQuantParams->_dst_quant.GetScale());
quantData->_src_quant.SetScale(sourceQuantParams->_dst_quant.GetScale());

View File

@ -556,10 +556,12 @@ void GNADeviceHelper::close() {
gnawarn() << "Request with Id " << requestId << " was not awaited successfully";
}
}
{
std::unique_lock<std::mutex> lockGnaCalls{ acrossPluginsSync };
const auto status = Gna2DeviceClose(nGnaDeviceIndex);
std::unique_lock<std::mutex> lockGnaCalls{ acrossPluginsSync };
const auto status = Gna2DeviceClose(nGnaDeviceIndex);
try {
checkGna2Status(status, "Gna2DeviceClose");
} catch (...) {
gnawarn() << "GNA Device was not successfully closed with status " << status << std::endl;
}
#endif
deviceOpened = false;

View File

@ -486,7 +486,7 @@ inline DataPtr CNNReplaceDataWithChangedTensorDescription(DataPtr old_data, Tens
/**
* @brief Creates a Reshape with given name and tensor description
*/
inline CNNLayerPtr CNNNetworkCreateReshape(TensorDesc td, std::string name, bool quantized) {
inline CNNLayerPtr CNNNetworkCreateReshape(const TensorDesc& td, const std::string& name, bool quantized) {
auto reshape = std::make_shared<ReshapeLayer>(LayerParams({name, "reshape", Precision::FP32}));
auto reshapeLayerWithQuant = quantized ? InferenceEngine::injectData<GNAPluginNS::QuantizedLayerParams>(reshape) : reshape;
auto dataPtr = std::make_shared<Data>(name + "_data", td);

View File

@ -925,7 +925,6 @@ void GNAModelSerial::ImportOutputs(std::istream &is,
HeaderLatest::RuntimeEndPoint output = ReadEndPoint(is);
OutputDesc description;
description.ptrs.push_back(reinterpret_cast<float*>(reinterpret_cast<uint8_t *> (basePtr) + output.descriptor_offset));
description.orientation = kDnnInterleavedOrientation;
description.orientation = output.orientation;
description.num_bytes_per_element = output.element_size;
description.scale_factor = output.scaleFactor;

View File

@ -29,6 +29,9 @@ private:
}
}
protected:
std::string _pluginInternalName = "GNA";
public:
InferenceEngine::IExecutableNetworkInternal::Ptr LoadExeNetworkImpl(
const InferenceEngine::CNNNetwork &network,
@ -67,7 +70,12 @@ public:
}
std::string GetName() const noexcept override {
return GetCurrentPlugin()->GetName();
auto ptr = plgPtr.lock();
if (ptr == nullptr) {
return _pluginInternalName;
} else {
return ptr->GetName();
}
}
InferenceEngine::QueryNetworkResult QueryNetwork(const InferenceEngine::CNNNetwork& network,

View File

@ -272,7 +272,7 @@ class LayerInfo {
return isOfType("permute");
}
// @brief this not only mathematically trivial, has some WA for kaldi case
bool isTrivialPermute() const {
bool isTrivialPermute() const noexcept {
if (!isPermute()) return false;
auto layerOrder = layer->GetParamAsInts("order");
@ -280,7 +280,9 @@ class LayerInfo {
if (layerOrder == std::vector<int>({ 0, 3, 2, 1 })) {
return true; // supported case
}
IE_ASSERT(!layer->insData.empty());
if (layer->insData.empty()) {
return false; // unsupported case
}
auto inputs = layer->insData.begin()->lock();
auto inputsOrder = inputs->getTensorDesc().getDims();

View File

@ -1741,6 +1741,9 @@ void RemoveSingleInputConcatPass::run() {
for (auto &l : *pLayers) {
if (l->type == "Concat") {
auto concat = dynamic_cast<ConcatLayer*>(l.get());
if (concat == nullptr) {
THROW_GNA_EXCEPTION << "Layer has type Concat but faild during casting to ConcatLayer";
}
if (concat->insData.size() == 1 && concat->outData.size() > 0) {
auto in = concat->insData[0];
auto in_layer = getCreatorLayer(in.lock());

View File

@ -468,7 +468,7 @@ std::vector<pwl_t> pwl_search(const DnnActivation& activation_type,
}
void PwlDesignOpt(const DnnActivation activation_type,
void PwlDesignOpt(const DnnActivation& activation_type,
std::vector<gna_pwl_segment_t> &ptr_segment,
const float scale_in,
const float scale_out,
@ -582,7 +582,7 @@ void PwlDesignOpt(const DnnActivation activation_type,
}
}
void PwlDesign(const DnnActivation activation_type,
void PwlDesign(const DnnActivation& activation_type,
gna_pwl_segment_t *ptr_segment,
const uint32_t num_segments,
const float scale_in,
@ -976,7 +976,6 @@ void PwlApply32(intel_dnn_component_t *component,
}
break;
case kActFakeQuantize: {
bool clamping = true;
double levels = transform->func_id.fqParams.levels;
for (uint32_t i = num_row_start; i <= num_row_end; i++) {
@ -988,16 +987,9 @@ void PwlApply32(intel_dnn_component_t *component,
double output_low = transform->func_id.fqParams.output_low[outputChannel];
double output_high = transform->func_id.fqParams.output_high[outputChannel];
auto scaleInput = (levels - 1) / (input_high - input_low);
auto scaleOutput = (levels - 1) / (output_high - output_low);
for (uint32_t j = num_col_start; j <= num_col_end; j++) {
auto offset = i * num_columns + j;
auto x = ptr_in[offset];
if (!clamping) {
ptr_out[offset] = ptr_in[offset] * scaleInput / scaleOutput;
continue;
}
if (x <= std::min(input_low, input_high)) {
ptr_out[offset] = output_low;

View File

@ -91,13 +91,13 @@ void PwlApply32(intel_dnn_component_t *component,
const uint32_t num_row_end,
const uint32_t num_col_start,
const uint32_t num_col_end);
void PwlDesign(const DnnActivation activation_type,
void PwlDesign(const DnnActivation& activation_type,
gna_pwl_segment_t *ptr_segment,
const uint32_t num_segments,
const float scale_in,
const float scale_out,
const bool low_precision);
void PwlDesignOpt(const DnnActivation activation_type,
void PwlDesignOpt(const DnnActivation& activation_type,
std::vector<gna_pwl_segment_t> &ptr_segment,
const float scale_in,
const float scale_out,