remove debug
This commit is contained in:
parent
2711af22ed
commit
574a3a04f3
@ -1,30 +0,0 @@
|
||||
// Copyright (C) 2023 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "debug_new_pass.hpp"
|
||||
|
||||
#ifdef DEBUG_VISUALIZE
|
||||
# include "ngraph/pass/visualize_tree.hpp" // DEBUG
|
||||
//#include "openvino/pass/serialize.hpp" // DEBUG
|
||||
# include <sstream>
|
||||
#endif
|
||||
|
||||
namespace intel_gna_debug {
|
||||
|
||||
void DebugVisualize(ov::pass::Manager& manager, const std::string& name) {
|
||||
#ifdef DEBUG_VISUALIZE
|
||||
static unsigned counter = 0;
|
||||
std::stringstream ss;
|
||||
# ifdef DEBUG_VISUALIZETREE
|
||||
ss << counter << "_" << name << ".png";
|
||||
manager.register_pass<ov::pass::VisualizeTree>(ss.str());
|
||||
# else
|
||||
ss << counter << "_" << name;
|
||||
manager.register_pass<ov::pass::Serialize>(ss.str() + ".xml", ss.str() + ".bin");
|
||||
# endif
|
||||
++counter;
|
||||
#endif
|
||||
}
|
||||
|
||||
} // namespace intel_gna_debug
|
@ -1,41 +0,0 @@
|
||||
// Copyright (C) 2023 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <iostream>
|
||||
|
||||
#undef DEBUG_USE_NEW_PASS
|
||||
#define DEBUG_USE_NEW_PASS 1
|
||||
|
||||
#undef DEBUG_VISUALIZE
|
||||
//#define DEBUG_VISUALIZE 1
|
||||
#undef DEBUG_VISUALIZETREE
|
||||
#define DEBUG_VISUALIZETREE 1
|
||||
|
||||
#define EMUTEX_DEBUG_CHECKPOINT std::cout << "[EMUTEX DEBUG] CHECKPOINT " << __FILE__ << ":" << __LINE__ << std::endl;
|
||||
#define EMUTEX_DEBUG_CHECKPOINT_MESSAGE(message) \
|
||||
std::cout << "[EMUTEX DEBUG] CHECKPOINT " << __FILE__ << ":" << __LINE__ << " " << message << std::endl;
|
||||
#define EMUTEX_DEBUG_VALUE(value) \
|
||||
std::cout << "[EMUTEX DEBUG] " << __FILE__ << ":" << __LINE__ << " " << #value << " = " << value << std::endl;
|
||||
|
||||
#include "openvino/pass/manager.hpp"
|
||||
|
||||
namespace intel_gna_debug {
|
||||
void DebugVisualize(ov::pass::Manager& manager, const std::string& name);
|
||||
} // namespace intel_gna_debug
|
||||
|
||||
template <typename T>
|
||||
std::ostream& operator<<(std::ostream& os, const std::vector<T>& vec) {
|
||||
os << "{";
|
||||
for (size_t i = 0; i < vec.size(); ++i) {
|
||||
if (i)
|
||||
os << ", ";
|
||||
os << i;
|
||||
}
|
||||
os << "}";
|
||||
return os;
|
||||
}
|
||||
|
||||
#define EMUTEX_DEBUG_VISUALIZE(name) intel_gna_debug::DebugVisualize(manager, name);
|
@ -6,7 +6,6 @@
|
||||
|
||||
#include "backend/gna_types.hpp"
|
||||
#include "common/gna_target.hpp"
|
||||
#include "debug_new_pass.hpp" // DEBUG
|
||||
#include "gna_graph_tools.hpp"
|
||||
#include "weights_converter.hpp"
|
||||
|
||||
@ -91,11 +90,7 @@ size_t LayerQuantizer::GetBiasSizeForLayer(InferenceEngine::WeightableLayer& wl)
|
||||
return wl._biases->size();
|
||||
} else if (LayerInfo(wl).isConvolution()) {
|
||||
// Calculating biases len using outdata dims: biases number should be equal to output channels number
|
||||
#ifndef DEBUG_USE_NEW_PASS
|
||||
return InferenceEngine::GetDataDimByName(wl.outData.front(), InferenceEngine::DataDimName::C);
|
||||
#else
|
||||
return InferenceEngine::GetDataDimSizeNHWC(wl.outData.front(), InferenceEngine::DataDimName::C);
|
||||
#endif
|
||||
} else {
|
||||
// Calculating biases size using outData dimensions
|
||||
return wl.outData.front()->getDims().back();
|
||||
|
@ -5,7 +5,6 @@
|
||||
#include "scale_factor_calc.hpp"
|
||||
|
||||
#include "common/numerical_utils.hpp"
|
||||
#include "debug_new_pass.hpp" // DEBUG
|
||||
#include "gna_slope_scale.hpp"
|
||||
#include "gna_upstream_iterator.hpp"
|
||||
#include "layer_quantizer.hpp"
|
||||
|
@ -24,7 +24,6 @@
|
||||
#include "backend/gna_limitations.hpp"
|
||||
#include "caseless.hpp"
|
||||
#include "common/numerical_utils.hpp"
|
||||
#include "debug_new_pass.hpp"
|
||||
#include "descriptions/gna_desc.hpp"
|
||||
#include "frontend/layer_quantizer.hpp"
|
||||
#include "frontend/scale_factor_calc.hpp"
|
||||
@ -313,7 +312,6 @@ void GNAGraphCompiler::assertConvolutionLayoutProper(const InferenceEngine::Data
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef DEBUG_USE_NEW_PASS
|
||||
namespace {
|
||||
|
||||
template <typename T>
|
||||
@ -327,7 +325,6 @@ PropertyVector<T> PropertyVectorAppend(PropertyVector<T> properties, T value) {
|
||||
}
|
||||
|
||||
} // namespace
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Create AMIntelDNN Convolutional1DComponent from ConvolutionLayer
|
||||
@ -354,7 +351,6 @@ void GNAGraphCompiler::ConvolutionPrimitive(InferenceEngine::CNNLayerPtr layer)
|
||||
const auto outputs = layer->outData.front();
|
||||
assertConvolutionLayoutProper(inputs);
|
||||
|
||||
#ifdef DEBUG_USE_NEW_PASS
|
||||
const auto in_batch = GetDataDimSizeNHWC(inputs, InferenceEngine::DataDimName::N);
|
||||
const auto in_channels = GetDataDimSizeNHWC(inputs, InferenceEngine::DataDimName::C);
|
||||
auto in_height = GetDataDimSizeNHWC(inputs, InferenceEngine::DataDimName::H);
|
||||
@ -374,18 +370,6 @@ void GNAGraphCompiler::ConvolutionPrimitive(InferenceEngine::CNNLayerPtr layer)
|
||||
convolution._pads_end = PropertyVectorAppend<unsigned int>(convolution._pads_end, 0);
|
||||
}
|
||||
|
||||
#else
|
||||
const auto in_batch = InferenceEngine::GetDataDimByName(inputs, InferenceEngine::DataDimName::N);
|
||||
const auto in_channels = InferenceEngine::GetDataDimByName(inputs, InferenceEngine::DataDimName::C);
|
||||
auto in_height = InferenceEngine::GetDataDimByName(inputs, InferenceEngine::DataDimName::H);
|
||||
auto in_width = InferenceEngine::GetDataDimByName(inputs, InferenceEngine::DataDimName::W);
|
||||
|
||||
const auto out_batch = InferenceEngine::GetDataDimByName(outputs, InferenceEngine::DataDimName::N);
|
||||
const auto out_channels = InferenceEngine::GetDataDimByName(outputs, InferenceEngine::DataDimName::C);
|
||||
auto out_height = InferenceEngine::GetDataDimByName(outputs, InferenceEngine::DataDimName::H);
|
||||
auto out_width = InferenceEngine::GetDataDimByName(outputs, InferenceEngine::DataDimName::W);
|
||||
#endif
|
||||
|
||||
if (in_height > 1 && in_width == 1 && !ShouldUseOnlyConv2DGnaIface()) {
|
||||
std::swap(in_height, in_width);
|
||||
std::swap(out_height, out_width);
|
||||
@ -627,34 +611,28 @@ void GNAGraphCompiler::finalizeConvolution1DPrimitive(InferenceEngine::CNNLayerP
|
||||
});
|
||||
}
|
||||
|
||||
#ifndef DEBUG_USE_NEW_PASS
|
||||
#endif
|
||||
connectOutput(layer, ptr_outputs, num_data_bytes_out);
|
||||
|
||||
// Transpose H with W or C with HW
|
||||
auto A = transpose_h_w ? in_kernel_h : in_channels;
|
||||
auto B = transpose_h_w ? in_kernel_w : convolution._kernel[X_AXIS];
|
||||
|
||||
std::vector<uint8_t> transposedWeights;
|
||||
std::vector<uint8_t> copiedWeights;
|
||||
for (uint32_t k = 0; k < num_filters; k++) {
|
||||
uint8_t* ptr_filt_current =
|
||||
convolution._weights->cbuffer().as<uint8_t*>() + k * A * B * convolution.precision.size();
|
||||
#ifdef DEBUG_USE_NEW_PASS
|
||||
auto transposedPart = copyMatrix(ptr_filt_current, convolution.precision.size(), A, B);
|
||||
#else
|
||||
auto transposedPart = transposeMatrix(ptr_filt_current, convolution.precision.size(), A, B);
|
||||
#endif
|
||||
transposedWeights.insert(transposedWeights.end(), transposedPart.begin(), transposedPart.end());
|
||||
auto copieddPart = copyMatrix(ptr_filt_current, convolution.precision.size(), A, B);
|
||||
copiedWeights.insert(copiedWeights.end(), copieddPart.begin(), copieddPart.end());
|
||||
}
|
||||
if (transposedWeights.size() != convolution._weights->byteSize()) {
|
||||
THROW_GNA_LAYER_EXCEPTION(&convolution) << "weights was transposed incorrectly. " << transposedWeights.size()
|
||||
if (copiedWeights.size() != convolution._weights->byteSize()) {
|
||||
THROW_GNA_LAYER_EXCEPTION(&convolution) << "weights was transposed incorrectly. " << copiedWeights.size()
|
||||
<< ' ' << convolution._weights->byteSize();
|
||||
}
|
||||
|
||||
if (num_conv_kernel_padding == 0) {
|
||||
gnamem->getQueue(REGION_RO)->push_local_ptr(layer,
|
||||
ptr_weights,
|
||||
transposedWeights.data(),
|
||||
copiedWeights.data(),
|
||||
convolution._weights->byteSize());
|
||||
} else {
|
||||
auto paddedWeights = num_filter_coefficients * num_filters;
|
||||
@ -666,7 +644,7 @@ void GNAGraphCompiler::finalizeConvolution1DPrimitive(InferenceEngine::CNNLayerP
|
||||
layerName,
|
||||
num_conv_kernel_padding,
|
||||
cpSize,
|
||||
transposedWeights,
|
||||
copiedWeights,
|
||||
num_filters,
|
||||
single_conv_kernel_size](void* data, std::size_t size) {
|
||||
if (paddedWeightsSize > size) {
|
||||
@ -678,7 +656,7 @@ void GNAGraphCompiler::finalizeConvolution1DPrimitive(InferenceEngine::CNNLayerP
|
||||
for (int i = 0; i < num_filters; i++) {
|
||||
ie_memcpy(dstPtr + offset,
|
||||
size - offset,
|
||||
transposedWeights.data() + single_conv_kernel_size * i * cpSize,
|
||||
copiedWeights.data() + single_conv_kernel_size * i * cpSize,
|
||||
single_conv_kernel_size * cpSize);
|
||||
offset += single_conv_kernel_size * cpSize;
|
||||
ie_memcpy(dstPtr + offset, size - offset, &padding_zeros[0], padding_zeros.size());
|
||||
@ -1010,7 +988,6 @@ void GNAGraphCompiler::PoolingPrimitive(InferenceEngine::CNNLayerPtr layer) {
|
||||
auto inputs = layer->insData.begin()->lock();
|
||||
auto outputs = *layer->outData.begin();
|
||||
|
||||
#ifdef DEBUG_USE_NEW_PASS
|
||||
uint32_t w_dim_in = GetDataDimSizeNHWC(inputs, InferenceEngine::DataDimName::W);
|
||||
uint32_t h_dim_in = GetDataDimSizeNHWC(inputs, InferenceEngine::DataDimName::H);
|
||||
const uint32_t c_dim_in = GetDataDimSizeNHWC(inputs, InferenceEngine::DataDimName::C);
|
||||
@ -1024,15 +1001,6 @@ void GNAGraphCompiler::PoolingPrimitive(InferenceEngine::CNNLayerPtr layer) {
|
||||
pooling._kernel = PropertyVectorAppend<unsigned int>(pooling._kernel, 1);
|
||||
pooling._stride = PropertyVectorAppend<unsigned int>(pooling._stride, 1);
|
||||
}
|
||||
#else
|
||||
uint32_t w_dim_in = InferenceEngine::GetDataDimByName(inputs, InferenceEngine::DataDimName::W);
|
||||
uint32_t h_dim_in = InferenceEngine::GetDataDimByName(inputs, InferenceEngine::DataDimName::H);
|
||||
const uint32_t c_dim_in = InferenceEngine::GetDataDimByName(inputs, InferenceEngine::DataDimName::C);
|
||||
|
||||
uint32_t w_dim_out = InferenceEngine::GetDataDimByName(outputs, InferenceEngine::DataDimName::W);
|
||||
uint32_t h_dim_out = InferenceEngine::GetDataDimByName(outputs, InferenceEngine::DataDimName::H);
|
||||
const uint32_t c_dim_out = InferenceEngine::GetDataDimByName(outputs, InferenceEngine::DataDimName::C);
|
||||
#endif
|
||||
|
||||
void* ptr_inputs = nullptr;
|
||||
void* ptr_outputs = nullptr;
|
||||
|
@ -4,7 +4,6 @@
|
||||
|
||||
#include "gna_transformations_pipeline.hpp"
|
||||
|
||||
#include "debug_new_pass.hpp"
|
||||
#include "gna_itt.hpp"
|
||||
#include "legacy/net_pass.h"
|
||||
#include "legacy/transformations/convert_opset1_to_legacy/convert_opset1_to_legacy.hpp"
|
||||
|
@ -8,7 +8,6 @@
|
||||
#include <transformations/utils/utils.hpp>
|
||||
#include <utility>
|
||||
|
||||
#include "../debug_new_pass.hpp"
|
||||
#include "openvino/opsets/opset9.hpp"
|
||||
#include "openvino/pass/pattern/op/wrap_type.hpp"
|
||||
#include "transformations/rt_info/gather_sinking_attr.hpp"
|
||||
|
@ -6,7 +6,6 @@
|
||||
|
||||
#include <openvino/cc/ngraph/itt.hpp>
|
||||
|
||||
#include "../debug_new_pass.hpp"
|
||||
#include "backend/gna_limitations.hpp"
|
||||
#include "common/graph_utils.hpp"
|
||||
#include "openvino/core/rt_info.hpp"
|
||||
|
Loading…
Reference in New Issue
Block a user