[GNA] Adding missed ifdef's (#7167)

* Added missed ifdef

* Moved ifdef's to avoid unused variables and unclosed scopes

* lower string number for ifdefs
This commit is contained in:
Andrey Sapozhnikov 2021-10-06 14:44:09 +03:00 committed by GitHub
parent 11516bb2b2
commit 9add27fd74
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -801,8 +801,10 @@ void GNAPluginNS::backend::AMIntelDNN::WriteDnnText(const char *filename, intel_
uint32_t num_bytes_per_bias = component[i].op.affine.num_bytes_per_bias;
float weight_scale_factor = component[i].op.affine.weight_scale_factor;
float output_scale_factor = component[i].output_scale_factor;
#if defined(DUMP_WB) || defined(LIGHT_DUMP)
uint32_t num_weight_rows = (component[i].operation == kDnnDiagonalOp) ? 1 : num_rows_out;
uint32_t num_weight_columns = num_rows_in;
#endif
if ((compute_precision_ == kDnnInt) && (logging_precision == kDnnFloat)) {
out_file << "<num_bytes_per_weight> " << std::dec << 4 << "\n";
out_file << "<num_bytes_per_bias> " << std::dec << 4 << "\n";
@ -823,15 +825,15 @@ void GNAPluginNS::backend::AMIntelDNN::WriteDnnText(const char *filename, intel_
<< GNAPluginNS::memory::MemoryOffset(component[i].op.affine.ptr_weights, ptr_dnn_memory_) << "\n";
out_file << "<bias_address> " << "0x" << std::setfill('0') << std::setw(8) << std::hex
<< GNAPluginNS::memory::MemoryOffset(component[i].op.affine.ptr_biases, ptr_dnn_memory_) << "\n";
#ifdef LIGHT_DUMP
std::ofstream out_wfile((out_file_name.str() + "_weights.txt").c_str(), std::ios::out);
std::ofstream out_bfile((out_file_name.str() + "_biases.txt").c_str(), std::ios::out);
#endif
if (num_bytes_per_weight == 1) {
if (num_bytes_per_bias != 1) {
#if defined(DUMP_WB) || defined(LIGHT_DUMP)
int8_t* ptr_weight = reinterpret_cast<int8_t*>(component[i].op.affine.ptr_weights);
gna_compound_bias_t* ptr_bias = reinterpret_cast<gna_compound_bias_t*>(component[i].op.affine.ptr_biases);
#ifdef DUMP_WB
for (uint32_t row = 0; row < num_weight_rows; row++) {
for (uint32_t col = 0; col < num_weight_columns; col++) {
if (logging_precision == kDnnFloat) {
@ -847,8 +849,8 @@ void GNAPluginNS::backend::AMIntelDNN::WriteDnnText(const char *filename, intel_
}
#endif
} else {
#if defined(DUMP_WB) || defined(LIGHT_DUMP)
int8_t* ptr_weight = reinterpret_cast<int8_t*>(component[i].op.affine.ptr_weights);
#ifdef DUMP_WB
for (uint32_t row = 0; row < num_weight_rows; row++) {
for (uint32_t col = 0; col < num_weight_columns; col++) {
if (logging_precision == kDnnFloat) {
@ -861,11 +863,11 @@ void GNAPluginNS::backend::AMIntelDNN::WriteDnnText(const char *filename, intel_
out_wfile << "\n";
}
}
}
#endif
}
} else if (num_bytes_per_weight == 2) {
#if defined(DUMP_WB) || defined(LIGHT_DUMP)
int16_t *ptr_weight = reinterpret_cast<int16_t *>(component[i].op.affine.ptr_weights);
#ifdef DUMP_WB
for (uint32_t row = 0; row < num_weight_rows; row++) {
for (uint32_t col = 0; col < num_weight_columns; col++) {
if (logging_precision == kDnnFloat) {
@ -879,8 +881,8 @@ void GNAPluginNS::backend::AMIntelDNN::WriteDnnText(const char *filename, intel_
}
#endif
} else if (compute_precision_ == kDnnFloat) {
#if defined(DUMP_WB) || defined(LIGHT_DUMP)
float *ptr_weight = reinterpret_cast<float *>(component[i].op.affine.ptr_weights);
#ifdef DUMP_WB
for (uint32_t row = 0; row < num_weight_rows; row++) {
for (uint32_t col = 0; col < num_weight_columns; col++) {
out_wfile << std::setprecision(5)
@ -896,9 +898,9 @@ void GNAPluginNS::backend::AMIntelDNN::WriteDnnText(const char *filename, intel_
if (compute_precision_ == kDnnInt) {
if (num_bytes_per_weight == 1) {
if (num_bytes_per_bias != 1) {
#if defined(DUMP_WB) || defined(LIGHT_DUMP)
gna_compound_bias_t
* ptr_biases = reinterpret_cast<gna_compound_bias_t*>(component[i].op.affine.ptr_biases);
#ifdef DUMP_WB
for (uint32_t row = 0; row < num_rows_out; row++) {
if (logging_precision == kDnnInt) {
out_bfile << std::setw(8) << ptr_biases[row].bias << ", ";
@ -909,8 +911,8 @@ void GNAPluginNS::backend::AMIntelDNN::WriteDnnText(const char *filename, intel_
}
#endif
} else {
#if defined(DUMP_WB) || defined(LIGHT_DUMP)
int8_t *ptr_biases = reinterpret_cast<int8_t*>(component[i].op.affine.ptr_biases);
#ifdef DUMP_WB
for (uint32_t row = 0; row < num_rows_out; row++) {
if (logging_precision == kDnnInt) {
out_bfile << std::setw(8) << ptr_biases[row] << "\n";
@ -921,8 +923,8 @@ void GNAPluginNS::backend::AMIntelDNN::WriteDnnText(const char *filename, intel_
#endif
}
} else {
#if defined(DUMP_WB) || defined(LIGHT_DUMP)
int32_t *ptr_biases = reinterpret_cast<int32_t *>(component[i].op.affine.ptr_biases);
#ifdef DUMP_WB
for (uint32_t row = 0; row < num_rows_out; row++) {
if (logging_precision == kDnnInt) {
out_bfile << std::setw(8) << ptr_biases[row] << "\n";
@ -932,11 +934,9 @@ void GNAPluginNS::backend::AMIntelDNN::WriteDnnText(const char *filename, intel_
}
#endif
}
} else {
#if defined(DUMP_WB) || defined(LIGHT_DUMP)
float *ptr_biases = reinterpret_cast<float *>(component[i].op.affine.ptr_biases);
#ifdef DUMP_WB
for (uint32_t row = 0; row < num_rows_out; row++) {
out_bfile << std::setprecision(5) << ptr_biases[row] << "\n";
}
@ -976,15 +976,15 @@ void GNAPluginNS::backend::AMIntelDNN::WriteDnnText(const char *filename, intel_
out_file << "<bias_address> " << "0x" << std::setfill('0') << std::setw(8) << std::hex
<< GNAPluginNS::memory::MemoryOffset(component[i].op.conv1D.ptr_biases, ptr_dnn_memory_) << "\n";
#ifdef LIGHT_DUMP
std::ofstream out_wfile((out_file_name.str() + "_weights.txt").c_str(), std::ios::out);
std::ofstream out_bfile((out_file_name.str() + "_biases.txt").c_str(), std::ios::out);
#endif
if (num_bytes_per_weight == 1) {
#if defined(DUMP_WB) || defined(LIGHT_DUMP)
int8_t *ptr_weight = reinterpret_cast<int8_t *>(component[i].op.conv1D.ptr_filters);
gna_compound_bias_t *ptr_bias = reinterpret_cast<gna_compound_bias_t *>(component[i].op.conv1D.ptr_biases);
#ifdef DUMP_WB
for (uint32_t row = 0; row < num_filters; row++) {
for (uint32_t col = 0; col < num_filter_coefficients; col++) {
if (logging_precision == kDnnFloat) {
@ -999,8 +999,8 @@ void GNAPluginNS::backend::AMIntelDNN::WriteDnnText(const char *filename, intel_
}
#endif
} else if (num_bytes_per_weight == 2) {
#if defined(DUMP_WB) || defined(LIGHT_DUMP)
int16_t *ptr_weight = reinterpret_cast<int16_t *>(component[i].op.conv1D.ptr_filters);
#ifdef DUMP_WB
for (uint32_t row = 0; row < num_filters; row++) {
for (uint32_t col = 0; col < num_filter_coefficients; col++) {
if (logging_precision == kDnnFloat) {
@ -1015,8 +1015,8 @@ void GNAPluginNS::backend::AMIntelDNN::WriteDnnText(const char *filename, intel_
}
#endif
} else if (compute_precision_ == kDnnFloat) {
#if defined(DUMP_WB) || defined(LIGHT_DUMP)
float *ptr_weight = reinterpret_cast<float *>(component[i].op.conv1D.ptr_filters);
#ifdef DUMP_WB
for (uint32_t row = 0; row < num_filters; row++) {
for (uint32_t col = 0; col < num_filter_coefficients; col++) {
out_wfile << std::setprecision(12)
@ -1032,9 +1032,9 @@ void GNAPluginNS::backend::AMIntelDNN::WriteDnnText(const char *filename, intel_
if (compute_precision_ == kDnnInt) {
if (logging_precision == kDnnInt) {
if (num_bytes_per_weight == 1) {
#if defined(DUMP_WB) || defined(LIGHT_DUMP)
gna_compound_bias_t
*ptr_biases = reinterpret_cast<gna_compound_bias_t *>(component[i].op.conv1D.ptr_biases);
#ifdef DUMP_WB
for (uint32_t row = 0; row < num_filters; row++) {
out_bfile << "0x" << std::setfill('0') << std::setw(8) << std::hex
<< ptr_biases[row].bias << " ";
@ -1043,8 +1043,8 @@ void GNAPluginNS::backend::AMIntelDNN::WriteDnnText(const char *filename, intel_
}
#endif
} else {
#if defined(DUMP_WB) || defined(LIGHT_DUMP)
int32_t *ptr_biases = reinterpret_cast<int32_t *>(component[i].op.conv1D.ptr_biases);
#ifdef DUMP_WB
for (uint32_t row = 0; row < num_filters; row++) {
out_bfile << "0x" << std::setfill('0') << std::setw(8) << std::hex << ptr_biases[row]
<< "\n";
@ -1052,8 +1052,8 @@ void GNAPluginNS::backend::AMIntelDNN::WriteDnnText(const char *filename, intel_
#endif
}
} else {
#if defined(DUMP_WB) || defined(LIGHT_DUMP)
int32_t *ptr_biases = reinterpret_cast<int32_t *>(component[i].op.conv1D.ptr_biases);
#ifdef DUMP_WB
for (uint32_t row = 0; row < num_filters; row++) {
out_bfile << std::setprecision(12)
<< ptr_biases[row] / output_scale_factor << "\n";
@ -1061,8 +1061,8 @@ void GNAPluginNS::backend::AMIntelDNN::WriteDnnText(const char *filename, intel_
#endif
}
} else {
#if defined(DUMP_WB) || defined(LIGHT_DUMP)
float *ptr_biases = reinterpret_cast<float *>(component[i].op.conv1D.ptr_biases);
#ifdef DUMP_WB
for (uint32_t row = 0; row < num_filters; row++) {
out_bfile << std::setprecision(12) << ptr_biases[row] << "\n";
}
@ -1104,8 +1104,10 @@ void GNAPluginNS::backend::AMIntelDNN::WriteDnnText(const char *filename, intel_
uint32_t num_vector_delay = component[i].op.recurrent.num_vector_delay;
uint32_t num_bytes_per_weight = component[i].op.recurrent.num_bytes_per_weight;
uint32_t num_bytes_per_bias = component[i].op.recurrent.num_bytes_per_bias;
#ifdef DUMP_WB
uint32_t num_weight_rows = num_columns_out;
uint32_t num_weight_columns = num_columns_in + num_columns_out;
#endif
out_file << "<num_vector_delay> " << std::dec << num_vector_delay << "\n";
if ((compute_precision_ == kDnnInt) && (logging_precision == kDnnFloat)) {
out_file << "<num_bytes_per_weight> " << std::dec << 4 << "\n";
@ -1130,10 +1132,10 @@ void GNAPluginNS::backend::AMIntelDNN::WriteDnnText(const char *filename, intel_
out_file << "<feedback_address> " << "0x" << std::setfill('0') << std::setw(8) << std::hex
<< GNAPluginNS::memory::MemoryOffset(component[i].op.recurrent.ptr_feedbacks, ptr_dnn_memory_) << "\n";
if (num_bytes_per_weight == 1) {
#ifdef DUMP_WB
int8_t *ptr_weight = reinterpret_cast<int8_t *>(component[i].op.recurrent.ptr_weights);
gna_compound_bias_t
*ptr_bias = reinterpret_cast<gna_compound_bias_t *>(component[i].op.recurrent.ptr_biases);
#ifdef DUMP_WB
for (uint32_t row = 0; row < num_weight_rows; row++) {
out_file << "<weight_row> ";
for (uint32_t col = 0; col < num_weight_columns; col++) {
@ -1151,8 +1153,8 @@ void GNAPluginNS::backend::AMIntelDNN::WriteDnnText(const char *filename, intel_
}
#endif
} else if (num_bytes_per_weight == 2) {
int16_t *ptr_weight = reinterpret_cast<int16_t *>(component[i].op.recurrent.ptr_weights);
#ifdef DUMP_WB
int16_t *ptr_weight = reinterpret_cast<int16_t *>(component[i].op.recurrent.ptr_weights);
for (uint32_t row = 0; row < num_weight_rows; row++) {
out_file << "<weight_row> ";
for (uint32_t col = 0; col < num_weight_columns; col++) {
@ -1168,8 +1170,8 @@ void GNAPluginNS::backend::AMIntelDNN::WriteDnnText(const char *filename, intel_
}
#endif
} else if (compute_precision_ == kDnnFloat) {
float *ptr_weight = reinterpret_cast<float *>(component[i].op.recurrent.ptr_weights);
#ifdef DUMP_WB
float *ptr_weight = reinterpret_cast<float *>(component[i].op.recurrent.ptr_weights);
for (uint32_t row = 0; row < num_weight_rows; row++) {
out_file << "<weight_row> ";
for (uint32_t col = 0; col < num_weight_columns; col++) {
@ -1186,10 +1188,10 @@ void GNAPluginNS::backend::AMIntelDNN::WriteDnnText(const char *filename, intel_
if (compute_precision_ == kDnnInt) {
if (logging_precision == kDnnInt) {
if (num_bytes_per_weight == 1) {
gna_compound_bias_t
*ptr_biases = reinterpret_cast<gna_compound_bias_t *>(component[i].op.recurrent.ptr_biases);
out_file << "<compound_bias>" << " ";
#ifdef DUMP_WB
gna_compound_bias_t
*ptr_biases = reinterpret_cast<gna_compound_bias_t *>(component[i].op.recurrent.ptr_biases);
for (uint32_t col = 0; col < num_columns_out; col++) {
out_file << "0x" << std::setfill('0') << std::setw(8) << std::hex
<< ptr_biases[col].bias << " ";
@ -1198,9 +1200,9 @@ void GNAPluginNS::backend::AMIntelDNN::WriteDnnText(const char *filename, intel_
}
#endif
} else {
int32_t *ptr_biases = reinterpret_cast<int32_t *>(component[i].op.recurrent.ptr_biases);
out_file << "<bias>" << " ";
#ifdef DUMP_WB
int32_t *ptr_biases = reinterpret_cast<int32_t *>(component[i].op.recurrent.ptr_biases);
for (uint32_t col = 0; col < num_columns_out; col++) {
out_file << "0x" << std::setfill('0') << std::setw(8) << std::hex << ptr_biases[col]
<< " ";
@ -1208,9 +1210,9 @@ void GNAPluginNS::backend::AMIntelDNN::WriteDnnText(const char *filename, intel_
#endif
}
} else {
int32_t *ptr_biases = reinterpret_cast<int32_t *>(component[i].op.recurrent.ptr_biases);
out_file << "<bias>" << " ";
#ifdef DUMP_WB
int32_t *ptr_biases = reinterpret_cast<int32_t *>(component[i].op.recurrent.ptr_biases);
for (uint32_t col = 0; col < num_columns_out; col++) {
out_file << std::setprecision(12) << std::scientific
<< ptr_biases[col] / output_scale_factor << " ";
@ -1218,9 +1220,9 @@ void GNAPluginNS::backend::AMIntelDNN::WriteDnnText(const char *filename, intel_
#endif
}
} else {
float *ptr_biases = reinterpret_cast<float *>(component[i].op.recurrent.ptr_biases);
out_file << "<bias>" << " ";
#ifdef DUMP_WB
float *ptr_biases = reinterpret_cast<float *>(component[i].op.recurrent.ptr_biases);
for (uint32_t col = 0; col < num_columns_out; col++) {
out_file << std::setprecision(12) << std::scientific << ptr_biases[col] << " ";
}
@ -2168,8 +2170,8 @@ void GNAPluginNS::backend::AMIntelDNN::WriteInputAndOutputText() {
in_file << std::setw(8) << floatValue / input_scale_factor << "\n";
}
}
#endif
}
#endif
}
uint32_t GNAPluginNS::backend::AMIntelDNN::num_components() {