diff --git a/src/plugins/intel_gna/src/backend/am_intel_dnn.cpp b/src/plugins/intel_gna/src/backend/am_intel_dnn.cpp index ff41c584a31..93ed69d62a3 100644 --- a/src/plugins/intel_gna/src/backend/am_intel_dnn.cpp +++ b/src/plugins/intel_gna/src/backend/am_intel_dnn.cpp @@ -45,18 +45,18 @@ */ #define LIGHT_DUMP -using gna_convolution_layer::outputFromConv; -using gna_convolution_layer::outputFromPooling; +using ov::intel_gna::gna_convolution_layer::outputFromConv; +using ov::intel_gna::gna_convolution_layer::outputFromPooling; namespace ov { namespace intel_gna { namespace backend { -void backend::AMIntelDNN::BeginNewWrite(uint32_t index) { +void AMIntelDNN::BeginNewWrite(uint32_t index) { dump_write_index = index; } -void backend::AMIntelDNN::Init(memory::GNAMemoryInterface* memoryInterface, +void AMIntelDNN::Init(memory::GNAMemoryInterface* memoryInterface, intel_dnn_number_type_t compute_precision, float scale_factor) { memory = memoryInterface; @@ -67,11 +67,11 @@ void backend::AMIntelDNN::Init(memory::GNAMemoryInterface* memoryInterface, num_active_outputs_ = 0; } -backend::AMIntelDNN::~AMIntelDNN() { +AMIntelDNN::~AMIntelDNN() { component.clear(); } -void backend::AMIntelDNN::InitActiveList(uint32_t *ptr_active_list) { +void AMIntelDNN::InitActiveList(uint32_t *ptr_active_list) { ptr_active_outputs_ = ptr_active_list; if (ptr_active_list == nullptr) { if (component[component.size() - 1].orientation_out == kDnnInterleavedOrientation) { @@ -85,7 +85,7 @@ void backend::AMIntelDNN::InitActiveList(uint32_t *ptr_active_list) { } -void backend::AMIntelDNN::InitAffineComponentPrivate(intel_dnn_component_t &comp, +void AMIntelDNN::InitAffineComponentPrivate(intel_dnn_component_t &comp, uint32_t num_rows_in, uint32_t num_columns, uint32_t num_rows_out, @@ -129,7 +129,7 @@ void backend::AMIntelDNN::InitAffineComponentPrivate(intel_dnn_component_t &comp } -void backend::AMIntelDNN::InitConvolutional1DComponentPrivate(intel_dnn_component_t &comp, +void AMIntelDNN::InitConvolutional1DComponentPrivate(intel_dnn_component_t &comp, uint32_t num_columns_in, uint32_t num_columns_out, uint32_t num_bytes_per_input, @@ -193,7 +193,7 @@ void backend::AMIntelDNN::InitConvolutional1DComponentPrivate(intel_dnn_componen } } -void backend::AMIntelDNN::InitConvolutional2DComponentPrivate(intel_dnn_component_t& comp, +void AMIntelDNN::InitConvolutional2DComponentPrivate(intel_dnn_component_t& comp, OvGnaTensor inputTensor, OvGnaTensor outputTensor, OvGnaTensor filterTensor, @@ -228,7 +228,7 @@ void backend::AMIntelDNN::InitConvolutional2DComponentPrivate(intel_dnn_componen ptr_outputs = &comp.ptr_outputs; } -bool backend::AMIntelDNN::isOperationCnnLegacySpecific(const Gna2Operation& op) { +bool AMIntelDNN::isOperationCnnLegacySpecific(const Gna2Operation& op) { // GNA compile target GNA_TARGET_3_0 does not support pooling window < pooling stride return op.Type == Gna2OperationTypeConvolution && op.NumberOfParameters > std::max(PoolStrideParamIdx, PoolWinParamIdx) && @@ -238,7 +238,7 @@ bool backend::AMIntelDNN::isOperationCnnLegacySpecific(const Gna2Operation& op) static_cast(op.Parameters[PoolStrideParamIdx])->Dimensions[0] > static_cast(op.Parameters[PoolWinParamIdx])->Dimensions[0]; } -void backend::AMIntelDNN::updateNumberOfOutputsIfPoolingEnabled(Gna2Model& gnaModel, bool useLegacyFormula) { +void AMIntelDNN::updateNumberOfOutputsIfPoolingEnabled(Gna2Model& gnaModel, bool useLegacyFormula) { IE_ASSERT(gnaModel.Operations != nullptr || gnaModel.NumberOfOperations == 0); for (uint32_t i = 0; i < gnaModel.NumberOfOperations; i++) { auto& gnaOp = gnaModel.Operations[i]; @@ -271,7 +271,7 @@ void backend::AMIntelDNN::updateNumberOfOutputsIfPoolingEnabled(Gna2Model& gnaMo } } -void backend::AMIntelDNN::InitMaxpoolComponentPrivate(intel_dnn_component_t &comp, +void AMIntelDNN::InitMaxpoolComponentPrivate(intel_dnn_component_t &comp, std::array inCHW, std::array outCHW, uint32_t num_bytes_per_input, @@ -302,7 +302,7 @@ void backend::AMIntelDNN::InitMaxpoolComponentPrivate(intel_dnn_component_t &com } } -void backend::AMIntelDNN::InitCopyComponentPrivate(intel_dnn_component_t &comp, +void AMIntelDNN::InitCopyComponentPrivate(intel_dnn_component_t &comp, intel_dnn_orientation_t orientation, uint32_t num_rows_in, uint32_t num_columns_in, @@ -341,7 +341,7 @@ void backend::AMIntelDNN::InitCopyComponentPrivate(intel_dnn_component_t &comp, } } -void backend::AMIntelDNN::InitPiecewiseLinearComponentPrivate(intel_dnn_component_t &comp, +void AMIntelDNN::InitPiecewiseLinearComponentPrivate(intel_dnn_component_t &comp, const DnnActivation& function_id, intel_dnn_orientation_t orientation, uint32_t num_rows, @@ -383,7 +383,7 @@ void backend::AMIntelDNN::InitPiecewiseLinearComponentPrivate(intel_dnn_componen } } -void backend::AMIntelDNN::InitInterleaveComponentPrivate(intel_dnn_component_t &comp, +void AMIntelDNN::InitInterleaveComponentPrivate(intel_dnn_component_t &comp, uint32_t num_rows_in, uint32_t num_columns_in, uint32_t num_bytes_per_input, @@ -412,7 +412,7 @@ void backend::AMIntelDNN::InitInterleaveComponentPrivate(intel_dnn_component_t & } } -void backend::AMIntelDNN::InitDeinterleaveComponentPrivate(intel_dnn_component_t &comp, +void AMIntelDNN::InitDeinterleaveComponentPrivate(intel_dnn_component_t &comp, uint32_t num_rows_in, uint32_t num_columns_in, uint32_t num_bytes_per_input, @@ -441,7 +441,7 @@ void backend::AMIntelDNN::InitDeinterleaveComponentPrivate(intel_dnn_component_t } } -float backend::AMIntelDNN::OutputScaleFactor(intel_dnn_component_t &comp) { +float AMIntelDNN::OutputScaleFactor(intel_dnn_component_t &comp) { return comp.output_scale_factor; } @@ -453,7 +453,7 @@ struct InputEndPoint { InputEndPoint(int nidx, size_t sz, size_t esize) : idx(nidx), size(sz), num_bytes_per_output(esize) {} }; -void backend::AMIntelDNN::WriteGraphWizModel(const char *filename) { +void AMIntelDNN::WriteGraphWizModel(const char *filename) { auto & components = component; #define IS_AFFINE(k)\ @@ -720,7 +720,7 @@ void PrintTensors(std::ofstream& out, T tensors) { } } -void backend::AMIntelDNN::PrintOffset(std::ofstream& out, const std::string& type, void* ptr) { +void AMIntelDNN::PrintOffset(std::ofstream& out, const std::string& type, void* ptr) { const auto queue = memory->getQueue(ptr); std::string typeOfRegion = "UNKNOWN_QUEUE"; auto offset = std::numeric_limits::max(); @@ -733,9 +733,9 @@ void backend::AMIntelDNN::PrintOffset(std::ofstream& out, const std::string& typ << "0x" << std::setfill('0') << std::setw(8) << std::hex << offset << "\n"; } -void backend::AMIntelDNN::WriteDnnText(const char *filename, intel_dnn_number_type_t logging_precision) { +void AMIntelDNN::WriteDnnText(const char *filename, intel_dnn_number_type_t logging_precision) { if ((compute_precision_ == kDnnFloat) && (logging_precision == kDnnInt)) { - fprintf(stderr, "Error trying to write floating point DNN as integer in backend::AMIntelDNN::WriteDnnText().\n"); + fprintf(stderr, "Error trying to write floating point DNN as integer in AMIntelDNN::WriteDnnText().\n"); fprintf(stderr, " Please convert to integer first.\n"); throw -1; } @@ -1358,7 +1358,7 @@ void backend::AMIntelDNN::WriteDnnText(const char *filename, intel_dnn_number_ty } } -uint32_t backend::AMIntelDNN::CountLayers() { +uint32_t AMIntelDNN::CountLayers() { uint32_t n = 0; for (auto && c : component) { if (c.operation == kDnnAffineOp @@ -1376,7 +1376,7 @@ uint32_t backend::AMIntelDNN::CountLayers() { return n; } -void backend::AMIntelDNN::InitGNAStruct(Gna2Model *gnaModel, const std::string& gnaCompileTarget) { +void AMIntelDNN::InitGNAStruct(Gna2Model *gnaModel, const std::string& gnaCompileTarget) { Gna2Operation * gnaOperation; if (gnaModel == nullptr) THROW_GNA_EXCEPTION << "Invalid input parameter"; @@ -1384,12 +1384,12 @@ void backend::AMIntelDNN::InitGNAStruct(Gna2Model *gnaModel, const std::string& THROW_GNA_EXCEPTION << "InitGNAStruct can't work on preallocated layers array"; if (component.empty()) - THROW_GNA_EXCEPTION << "empty model in backend::AMIntelDNN::InitGNAStruct()"; + THROW_GNA_EXCEPTION << "empty model in AMIntelDNN::InitGNAStruct()"; gnaModel->NumberOfOperations = CountLayers(); gnaModel->Operations = reinterpret_cast(gnaUserAllocator(gnaModel->NumberOfOperations * sizeof(Gna2Operation))); if (gnaModel->Operations == nullptr) - THROW_GNA_EXCEPTION << "out of memory in backend::AMIntelDNN::InitGNAStruct()"; + THROW_GNA_EXCEPTION << "out of memory in AMIntelDNN::InitGNAStruct()"; memset(gnaModel->Operations, 0, gnaModel->NumberOfOperations * sizeof(Gna2Operation)); gnaOperation = gnaModel->Operations; for (int i = 0; i < component.size(); i++) { @@ -1641,7 +1641,7 @@ void backend::AMIntelDNN::InitGNAStruct(Gna2Model *gnaModel, const std::string& gnaModel->NumberOfOperations = static_cast(std::distance(gnaModel->Operations, gnaOperation)); } -void backend::AMIntelDNN::DestroyGNAStruct(Gna2Model *gnaModel) { +void AMIntelDNN::DestroyGNAStruct(Gna2Model *gnaModel) { if (gnaModel->Operations != nullptr) { for (uint32_t i = 0; i < gnaModel->NumberOfOperations; i++) { switch (gnaModel->Operations[i].Type) { @@ -1661,7 +1661,7 @@ void backend::AMIntelDNN::DestroyGNAStruct(Gna2Model *gnaModel) { gnaModel->NumberOfOperations = 0; } -void backend::AMIntelDNN::WriteInputAndOutputTextGNA(const Gna2Model & model) { +void AMIntelDNN::WriteInputAndOutputTextGNA(const Gna2Model & model) { #ifdef LIGHT_DUMP dump::WriteInputAndOutputTextGNAImpl( model, @@ -1670,7 +1670,7 @@ void backend::AMIntelDNN::WriteInputAndOutputTextGNA(const Gna2Model & model) { #endif } -void backend::AMIntelDNN::WriteInputAndOutputText() { +void AMIntelDNN::WriteInputAndOutputText() { #ifdef LIGHT_DUMP for (uint32_t i = 0; i < num_components(); i++) { std::stringstream out_file_name; @@ -1766,11 +1766,11 @@ void backend::AMIntelDNN::WriteInputAndOutputText() { #endif } -uint32_t backend::AMIntelDNN::num_components() { +uint32_t AMIntelDNN::num_components() { return static_cast(component.size()); } -uint32_t backend::AMIntelDNN::num_gna_layers() { +uint32_t AMIntelDNN::num_gna_layers() { uint32_t num_layers = 0; std::set gna_layers({ kDnnAffineOp, kDnnDiagonalOp, @@ -1787,27 +1787,27 @@ uint32_t backend::AMIntelDNN::num_gna_layers() { return num_layers; } -uint32_t backend::AMIntelDNN::num_group_in() { +uint32_t AMIntelDNN::num_group_in() { return ((!component.empty()) ? ((component[0].orientation_in == kDnnInterleavedOrientation) ? component[0].num_columns_in : component[0].num_rows_in) : 0); } -uint32_t backend::AMIntelDNN::num_group_out() { +uint32_t AMIntelDNN::num_group_out() { return ((!component.empty()) ? ((component[component.size() - 1].orientation_out == kDnnInterleavedOrientation) ? component[component.size() - 1].num_columns_out : component[component.size() - 1].num_rows_out) : 0); } -uint32_t backend::AMIntelDNN::num_inputs() { +uint32_t AMIntelDNN::num_inputs() { return component.empty() ? 0 : component[0].num_rows_in; } -uint32_t backend::AMIntelDNN::num_outputs() { +uint32_t AMIntelDNN::num_outputs() { return (component[component.size() - 1].orientation_out == kDnnInterleavedOrientation) ? component[ component.size() - 1].num_rows_out : component[component.size() - 1].num_columns_out; } -std::string backend::AMIntelDNN::getDumpFilePrefix(const std::string& folder) { +std::string AMIntelDNN::getDumpFilePrefix(const std::string& folder) { const char pathSeparator = #ifdef _WIN32 '\\'; @@ -1817,15 +1817,15 @@ std::string backend::AMIntelDNN::getDumpFilePrefix(const std::string& folder) { return std::string(".") + pathSeparator + folder + pathSeparator + std::to_string(dump_write_index) + pathSeparator; } -std::string backend::AMIntelDNN::getDumpFilePrefixGNA() { +std::string AMIntelDNN::getDumpFilePrefixGNA() { return getDumpFilePrefix("gna_layers"); } -std::string backend::AMIntelDNN::getDumpFolderName() { +std::string AMIntelDNN::getDumpFolderName() { return getDumpFilePrefix("layers"); } -std::string backend::AMIntelDNN::getRefFolderName() { +std::string AMIntelDNN::getRefFolderName() { return getDumpFilePrefix("ref_layers"); } diff --git a/src/plugins/intel_gna/src/gna_graph_compiler.cpp b/src/plugins/intel_gna/src/gna_graph_compiler.cpp index b7376d5eb62..666abd1d420 100644 --- a/src/plugins/intel_gna/src/gna_graph_compiler.cpp +++ b/src/plugins/intel_gna/src/gna_graph_compiler.cpp @@ -41,11 +41,13 @@ #include "ops/pwl.hpp" using namespace InferenceEngine; -using namespace std; -using namespace ov::intel_gna; using namespace ov::intel_gna::frontend; using namespace ov::intel_gna::common; -using namespace memory; +using namespace ov::intel_gna::memory; +using namespace std; + +namespace ov { +namespace intel_gna { static bool CheckIFLastComponentIsPrecededByConv2D(const backend::DnnComponents::storage_type& components, bool verify_with_pooling = true) { @@ -2688,3 +2690,6 @@ GNAGraphCompiler::transposeMatrix(uint8_t* ptr_matrix, size_t element_size, uint } return temp_buffer; } + +} // namespace intel_gna +} // namespace ov diff --git a/src/plugins/intel_gna/src/serial/headers/2dot2/gna_model_header.hpp b/src/plugins/intel_gna/src/serial/headers/2dot2/gna_model_header.hpp index 7ae9926b2ff..e063e694745 100644 --- a/src/plugins/intel_gna/src/serial/headers/2dot2/gna_model_header.hpp +++ b/src/plugins/intel_gna/src/serial/headers/2dot2/gna_model_header.hpp @@ -13,7 +13,7 @@ namespace ov { namespace intel_gna { -namespace heaser_2_dot_2 { +namespace header_2_dot_2 { /** * @brief Header version 2.2 @@ -119,6 +119,6 @@ struct RuntimeEndPoint { orientation(orientation) {} }; -} // namespace heaser_2_dot_2 +} // namespace header_2_dot_2 } // namespace intel_gna } // namespace ov diff --git a/src/plugins/intel_gna/tests/unit/backend/gna_limitations_test.cpp b/src/plugins/intel_gna/tests/unit/backend/gna_limitations_test.cpp index 280d44b416f..6dca085540f 100644 --- a/src/plugins/intel_gna/tests/unit/backend/gna_limitations_test.cpp +++ b/src/plugins/intel_gna/tests/unit/backend/gna_limitations_test.cpp @@ -13,7 +13,7 @@ using namespace ov::intel_gna::limitations; using ov::intel_gna::common::kGnaTarget3_0; using ov::intel_gna::common::kGnaTarget3_5; -struct GNAcnn2dValidatorTestParam { +struct GNACnn2DValidatorTestParam { std::string target; std::string whatInvalid; std::vector invalid; @@ -49,167 +49,167 @@ const std::vector kInvaliddH_35 = {0, 2, 2049}; const std::vector kInvaliddW_30 = {0, 2, 400}; const std::vector kInvaliddW_35 = {0, 2, 2049}; -const GNAcnn2dValidatorTestParam target_30 { +const GNACnn2DValidatorTestParam target_30 { kGnaTarget3_0, "inH", kInvalidH_30, }; -const GNAcnn2dValidatorTestParam target_35 { +const GNACnn2DValidatorTestParam target_35 { kGnaTarget3_5, "inH", kInvalidH_35, }; -const GNAcnn2dValidatorTestParam target_30_inW{ +const GNACnn2DValidatorTestParam target_30_inW{ kGnaTarget3_0, "inW", kInvalidW_30, }; -const GNAcnn2dValidatorTestParam target_35_inW{ +const GNACnn2DValidatorTestParam target_35_inW{ kGnaTarget3_5, "inW", kInvalidW_35, }; -const GNAcnn2dValidatorTestParam target_30_inC{ +const GNACnn2DValidatorTestParam target_30_inC{ kGnaTarget3_0, "inC", kInvalidC_30, }; -const GNAcnn2dValidatorTestParam target_35_inC{ +const GNACnn2DValidatorTestParam target_35_inC{ kGnaTarget3_5, "inC", kInvalidC_35, }; -const GNAcnn2dValidatorTestParam target_30_kH{ +const GNACnn2DValidatorTestParam target_30_kH{ kGnaTarget3_0, "kH", kInvalidkH_30, }; -const GNAcnn2dValidatorTestParam target_35_kH{ +const GNACnn2DValidatorTestParam target_35_kH{ kGnaTarget3_5, "kH", kInvalidkH_35, }; -const GNAcnn2dValidatorTestParam target_30_kW{ +const GNACnn2DValidatorTestParam target_30_kW{ kGnaTarget3_0, "kW", kInvalidkW_30, }; -const GNAcnn2dValidatorTestParam target_35_kW{ +const GNACnn2DValidatorTestParam target_35_kW{ kGnaTarget3_5, "kW", kInvalidkW_35, }; -const GNAcnn2dValidatorTestParam target_30_kN{ +const GNACnn2DValidatorTestParam target_30_kN{ kGnaTarget3_0, "inC", kInvalidkN_30, }; -const GNAcnn2dValidatorTestParam target_35_kN{ +const GNACnn2DValidatorTestParam target_35_kN{ kGnaTarget3_5, "inC", kInvalidkN_35, }; -const GNAcnn2dValidatorTestParam target_30_sH{ +const GNACnn2DValidatorTestParam target_30_sH{ kGnaTarget3_0, "sH", kInvalidsH_30, }; -const GNAcnn2dValidatorTestParam target_35_sH{ +const GNACnn2DValidatorTestParam target_35_sH{ kGnaTarget3_5, "sH", kInvalidsH_35, }; -const GNAcnn2dValidatorTestParam target_30_sW{ +const GNACnn2DValidatorTestParam target_30_sW{ kGnaTarget3_0, "sW", kInvalidsW_30, }; -const GNAcnn2dValidatorTestParam target_35_sW{ +const GNACnn2DValidatorTestParam target_35_sW{ kGnaTarget3_5, "sW", kInvalidsW_35, }; -const GNAcnn2dValidatorTestParam target_30_dH{ +const GNACnn2DValidatorTestParam target_30_dH{ kGnaTarget3_0, "dH", kInvaliddH_30, }; -const GNAcnn2dValidatorTestParam target_35_dH{ +const GNACnn2DValidatorTestParam target_35_dH{ kGnaTarget3_5, "dH", kInvaliddH_35, }; -const GNAcnn2dValidatorTestParam target_30_dW{ +const GNACnn2DValidatorTestParam target_30_dW{ kGnaTarget3_0, "dW", kInvaliddW_30, }; -const GNAcnn2dValidatorTestParam target_35_dW{ +const GNACnn2DValidatorTestParam target_35_dW{ kGnaTarget3_5, "dW", kInvaliddW_35, }; const std::vector kInvalidpw_30 = {0, 2, 10}; -const GNAcnn2dValidatorTestParam target_30_pwH{ +const GNACnn2DValidatorTestParam target_30_pwH{ kGnaTarget3_0, "windowH", kInvalidpw_30, }; -const GNAcnn2dValidatorTestParam target_30_pwW{ +const GNACnn2DValidatorTestParam target_30_pwW{ kGnaTarget3_0, "windowW", kInvalidpw_30, }; const std::vector kInvalidps_30 = {0, 4, 10}; -const GNAcnn2dValidatorTestParam target_30_psH{ +const GNACnn2DValidatorTestParam target_30_psH{ kGnaTarget3_0, "strideH", kInvalidps_30, }; -const GNAcnn2dValidatorTestParam target_30_psW{ +const GNACnn2DValidatorTestParam target_30_psW{ kGnaTarget3_0, "strideW", kInvalidps_30, }; const std::vector kInvalidPoolingRange35 = {0, 256}; -const GNAcnn2dValidatorTestParam target_35_pwH{ +const GNACnn2DValidatorTestParam target_35_pwH{ kGnaTarget3_5, "windowH", kInvalidPoolingRange35, }; -const GNAcnn2dValidatorTestParam target_35_pwW{ +const GNACnn2DValidatorTestParam target_35_pwW{ kGnaTarget3_5, "windowW", kInvalidPoolingRange35, }; -const GNAcnn2dValidatorTestParam target_35_psH{ +const GNACnn2DValidatorTestParam target_35_psH{ kGnaTarget3_5, "strideH", kInvalidPoolingRange35, }; -const GNAcnn2dValidatorTestParam target_35_psW{ +const GNACnn2DValidatorTestParam target_35_psW{ kGnaTarget3_5, "strideW", kInvalidPoolingRange35, @@ -279,7 +279,7 @@ struct Validatecnn2dParams { } }; -class GNAcnn2dValidatorTest : public ::testing::TestWithParam { +class GNAcnn2dValidatorTest : public ::testing::TestWithParam { protected: void SetUp() override { validator = cnn2d::AbstractValidator::Create(GetParam().target); diff --git a/src/plugins/intel_gna/tests/unit/gna_get_aligned_split_sizes.cpp b/src/plugins/intel_gna/tests/unit/gna_get_aligned_split_sizes.cpp index 9c948b2f1c2..90c70c77d2f 100644 --- a/src/plugins/intel_gna/tests/unit/gna_get_aligned_split_sizes.cpp +++ b/src/plugins/intel_gna/tests/unit/gna_get_aligned_split_sizes.cpp @@ -11,8 +11,6 @@ namespace { -using namespace ov::intel_gna; - using GetAlignedSplitSizesData = std::tuple< uint32_t, // total size uint32_t, // maximum split size @@ -29,7 +27,8 @@ const std::vector data = { TEST(GetAlignedSplitSizesTest, testAlignedSplitSizes) { for (const auto &dataItem : data) { - auto sizes = GetAlignedSplitSizes(std::get<0>(dataItem), std::get<1>(dataItem), + auto sizes = + ov::intel_gna::GetAlignedSplitSizes(std::get<0>(dataItem), std::get<1>(dataItem), std::get<2>(dataItem)); ASSERT_EQ(sizes, std::get<3>(dataItem)); }