diff --git a/cmake/developer_package/clang_format/clang_format.cmake b/cmake/developer_package/clang_format/clang_format.cmake index 24679f742b8..d75a4535932 100644 --- a/cmake/developer_package/clang_format/clang_format.cmake +++ b/cmake/developer_package/clang_format/clang_format.cmake @@ -3,23 +3,23 @@ # if(ENABLE_CLANG_FORMAT) - set(clang_format_required_version 9) - set(CLANG_FORMAT_FILENAME clang-format-${clang_format_required_version} clang-format) + set(CLANG_FORMAT_REQUIRED_VERSION 9 CACHE STRING "Clang-format version to use") + set(CLANG_FORMAT_FILENAME clang-format-${CLANG_FORMAT_REQUIRED_VERSION} clang-format) find_host_program(CLANG_FORMAT NAMES ${CLANG_FORMAT_FILENAME} PATHS ENV PATH) if(CLANG_FORMAT) execute_process(COMMAND ${CLANG_FORMAT} ${CMAKE_CURRENT_SOURCE_DIR} ARGS --version OUTPUT_VARIABLE CLANG_VERSION) if(NOT CLANG_VERSION) - message(WARNING "Supported clang-format version is ${clang_format_required_version}!") + message(WARNING "Supported clang-format version is ${CLANG_FORMAT_REQUIRED_VERSION}!") set(ENABLE_CLANG_FORMAT OFF) else() string(REGEX REPLACE "[^0-9]+([0-9]+)\\..*" "\\1" CLANG_FORMAT_MAJOR_VERSION ${CLANG_VERSION}) - if(NOT CLANG_FORMAT_MAJOR_VERSION EQUAL clang_format_required_version) + if(NOT CLANG_FORMAT_MAJOR_VERSION EQUAL CLANG_FORMAT_REQUIRED_VERSION) message(WARNING "Supported clang-format version is 9! Provided version ${CLANG_FORMAT_MAJOR_VERSION}") set(ENABLE_CLANG_FORMAT OFF) endif() endif() else() - message(WARNING "Supported clang-format-${clang_format_required_version} is not found!") + message(WARNING "Supported clang-format-${CLANG_FORMAT_REQUIRED_VERSION} is not found!") set(ENABLE_CLANG_FORMAT OFF) endif() endif() diff --git a/cmake/developer_package/compile_flags/sanitizer.cmake b/cmake/developer_package/compile_flags/sanitizer.cmake index 8bb88685be4..0a609d91b1d 100644 --- a/cmake/developer_package/compile_flags/sanitizer.cmake +++ b/cmake/developer_package/compile_flags/sanitizer.cmake @@ -33,7 +33,7 @@ if (ENABLE_UB_SANITIZER) # https://github.com/KhronosGroup/OpenCL-CLHPP/issues/17 # Mute -fsanitize=function Indirect call of a function through a function pointer of the wrong type. # Sample cases: - # call to function GetAPIVersion through pointer to incorrect function type 'void *(*)()' + # call to function get_api_version through pointer to incorrect function type 'void *(*)()' # Mute -fsanitize=alignment Use of a misaligned pointer or creation of a misaligned reference. Also sanitizes assume_aligned-like attributes. # Sample cases: # VPU_FixedMaxHeapTest.DefaultConstructor test case load of misaligned address 0x62000000187f for type 'const DataType', which requires 4 byte alignment diff --git a/cmake/developer_package/frontends/create_frontends_hpp.cmake b/cmake/developer_package/frontends/create_frontends_hpp.cmake index eeadad18650..06c7ad9f3c4 100644 --- a/cmake/developer_package/frontends/create_frontends_hpp.cmake +++ b/cmake/developer_package/frontends/create_frontends_hpp.cmake @@ -15,8 +15,8 @@ set(OV_FRONTEND_MAP_DEFINITION " FrontendsStaticRegistry registry = {") foreach(frontend IN LISTS FRONTEND_NAMES) # common - set(_OV_FRONTEND_DATA_FUNC "GetFrontEndData${frontend}") - set(_OV_VERSION_FUNC "GetAPIVersion${frontend}") + set(_OV_FRONTEND_DATA_FUNC "get_front_end_data_${frontend}") + set(_OV_VERSION_FUNC "get_api_version_${frontend}") # declarations set(OV_FRONTEND_DECLARATIONS "${OV_FRONTEND_DECLARATIONS} diff --git a/cmake/developer_package/frontends/frontends.cmake b/cmake/developer_package/frontends/frontends.cmake index a5393c59d00..f881d0bed17 100644 --- a/cmake/developer_package/frontends/frontends.cmake +++ b/cmake/developer_package/frontends/frontends.cmake @@ -190,21 +190,8 @@ macro(ov_add_frontend) if(NOT BUILD_SHARED_LIBS) # override default function names target_compile_definitions(${TARGET_NAME} PRIVATE - "-DGetFrontEndData=GetFrontEndData${OV_FRONTEND_NAME}" - "-DGetAPIVersion=GetAPIVersion${OV_FRONTEND_NAME}") - endif() - - # enable LTO - set_target_properties(${TARGET_NAME} PROPERTIES - INTERPROCEDURAL_OPTIMIZATION_RELEASE ${ENABLE_LTO}) - - if(OV_FRONTEND_SKIP_NCC_STYLE) - # frontend's CMakeLists.txt must define its own custom 'ov_ncc_naming_style' step - else() - ov_ncc_naming_style(FOR_TARGET ${TARGET_NAME} - SOURCE_DIRECTORY "${frontend_root_dir}/include" - ADDITIONAL_INCLUDE_DIRECTORIES - $) + "-Dget_front_end_data=get_front_end_data_${OV_FRONTEND_NAME}" + "-Dget_api_version=get_api_version_${OV_FRONTEND_NAME}") endif() target_include_directories(${TARGET_NAME} @@ -255,6 +242,21 @@ macro(ov_add_frontend) add_clang_format_target(${TARGET_NAME}_clang FOR_TARGETS ${TARGET_NAME} EXCLUDE_PATTERNS ${PROTO_SRCS} ${PROTO_HDRS} ${proto_files} ${flatbuffers_schema_files}) + # enable LTO + set_target_properties(${TARGET_NAME} PROPERTIES + INTERPROCEDURAL_OPTIMIZATION_RELEASE ${ENABLE_LTO}) + + if(OV_FRONTEND_SKIP_NCC_STYLE) + # frontend's CMakeLists.txt must define its own custom 'ov_ncc_naming_style' step + else() + ov_ncc_naming_style(FOR_TARGET ${TARGET_NAME} + SOURCE_DIRECTORIES "${frontend_root_dir}/include" + "${frontend_root_dir}/src" + ADDITIONAL_INCLUDE_DIRECTORIES + $ + $) + endif() + add_dependencies(ov_frontends ${TARGET_NAME}) # must be called after all target_link_libraries diff --git a/cmake/developer_package/frontends/ov_frontends.hpp.in b/cmake/developer_package/frontends/ov_frontends.hpp.in index 6a07e859b9d..dba0993e9a1 100644 --- a/cmake/developer_package/frontends/ov_frontends.hpp.in +++ b/cmake/developer_package/frontends/ov_frontends.hpp.in @@ -10,12 +10,12 @@ namespace { -using GetFrontEndDataFunc = void*(); -using GetAPIVersionFunc = ov::frontend::FrontEndVersion(); +using get_front_end_data_func = void*(); +using get_api_version_func = ov::frontend::FrontEndVersion(); struct Value { - GetFrontEndDataFunc* m_dataFunc; - GetAPIVersionFunc* m_versionFunc; + get_front_end_data_func* m_dataFunc; + get_api_version_func* m_versionFunc; }; using FrontendsStaticRegistry = std::vector; diff --git a/cmake/developer_package/ncc_naming_style/ncc_naming_style.cmake b/cmake/developer_package/ncc_naming_style/ncc_naming_style.cmake index 544aee979e6..e463cb7b347 100644 --- a/cmake/developer_package/ncc_naming_style/ncc_naming_style.cmake +++ b/cmake/developer_package/ncc_naming_style/ncc_naming_style.cmake @@ -112,13 +112,13 @@ endif() # # ov_ncc_naming_style(FOR_TARGET target_name -# SOURCE_DIRECTORY dir +# [SOURCE_DIRECTORIES dir1 dir2 ...] # [STYLE_FILE style_file.style] # [ADDITIONAL_INCLUDE_DIRECTORIES dir1 dir2 ..] # [DEFINITIONS def1 def2 ..]) # # FOR_TARGET - name of the target -# SOURCE_DIRECTORY - directory to check sources from +# SOURCE_DIRECTORIES - directory to check sources from # STYLE_FILE - path to the specific style file # ADDITIONAL_INCLUDE_DIRECTORIES - additional include directories used in checked headers # DEFINITIONS - additional definitions passed to preprocessor stage @@ -129,9 +129,9 @@ function(ov_ncc_naming_style) endif() cmake_parse_arguments(NCC_STYLE "FAIL" - "FOR_TARGET;SOURCE_DIRECTORY;STYLE_FILE" "ADDITIONAL_INCLUDE_DIRECTORIES;DEFINITIONS" ${ARGN}) + "FOR_TARGET;STYLE_FILE" "SOURCE_DIRECTORIES;ADDITIONAL_INCLUDE_DIRECTORIES;DEFINITIONS" ${ARGN}) - foreach(var FOR_TARGET SOURCE_DIRECTORY) + foreach(var FOR_TARGET SOURCE_DIRECTORIES) if(NOT DEFINED NCC_STYLE_${var}) message(FATAL_ERROR "${var} is not defined in ov_ncc_naming_style function") endif() @@ -141,18 +141,18 @@ function(ov_ncc_naming_style) set(NCC_STYLE_STYLE_FILE ${ncc_style_dir}/openvino.style) endif() - file(GLOB_RECURSE sources - RELATIVE "${NCC_STYLE_SOURCE_DIRECTORY}" - "${NCC_STYLE_SOURCE_DIRECTORY}/*.hpp" - "${NCC_STYLE_SOURCE_DIRECTORY}/*.cpp") + foreach(source_dir IN LISTS NCC_STYLE_SOURCE_DIRECTORIES) + file(GLOB_RECURSE local_sources "${source_dir}/*.hpp" "${source_dir}/*.cpp") + list(APPEND sources ${local_sources}) + endforeach() - list(APPEND NCC_STYLE_ADDITIONAL_INCLUDE_DIRECTORIES "${NCC_STYLE_SOURCE_DIRECTORY}") - # without it sources with same name from different directories will map to same .ncc_style target - file(RELATIVE_PATH source_dir_rel ${CMAKE_SOURCE_DIR} ${NCC_STYLE_SOURCE_DIRECTORY}) + list(APPEND NCC_STYLE_ADDITIONAL_INCLUDE_DIRECTORIES ${NCC_STYLE_SOURCE_DIRECTORIES}) - foreach(source IN LISTS sources) - set(output_file "${ncc_style_bin_dir}/${source_dir_rel}/${source}.ncc_style") - set(full_source_path "${NCC_STYLE_SOURCE_DIRECTORY}/${source}") + foreach(source_file IN LISTS sources) + get_filename_component(source_dir "${source_file}" DIRECTORY) + file(RELATIVE_PATH source_dir_rel "${CMAKE_SOURCE_DIR}" "${source_dir}") + get_filename_component(source_name "${source_file}" NAME) + set(output_file "${ncc_style_bin_dir}/${source_dir_rel}/${source_name}.ncc_style") add_custom_command( OUTPUT @@ -161,7 +161,7 @@ function(ov_ncc_naming_style) "${CMAKE_COMMAND}" -D "PYTHON_EXECUTABLE=${PYTHON_EXECUTABLE}" -D "NCC_PY_SCRIPT=${ncc_script_py}" - -D "INPUT_FILE=${full_source_path}" + -D "INPUT_FILE=${source_file}" -D "OUTPUT_FILE=${output_file}" -D "DEFINITIONS=${NCC_STYLE_DEFINITIONS}" -D "CLANG_LIB_PATH=${libclang_location}" @@ -170,12 +170,12 @@ function(ov_ncc_naming_style) -D "EXPECTED_FAIL=${NCC_STYLE_FAIL}" -P "${ncc_style_dir}/ncc_run.cmake" DEPENDS - "${full_source_path}" + "${source_file}" "${ncc_style_dir}/openvino.style" "${ncc_script_py}" "${ncc_style_dir}/ncc_run.cmake" COMMENT - "[ncc naming style] ${source}" + "[ncc naming style] ${source_dir_rel}/${source_name}" VERBATIM) list(APPEND output_files ${output_file}) endforeach() @@ -191,6 +191,6 @@ endfunction() if(TARGET ncc_all) ov_ncc_naming_style(FOR_TARGET ncc_all - SOURCE_DIRECTORY "${ncc_style_dir}/self_check" + SOURCE_DIRECTORIES "${ncc_style_dir}/self_check" FAIL) endif() diff --git a/cmake/developer_package/ncc_naming_style/openvino.style b/cmake/developer_package/ncc_naming_style/openvino.style index 68eecd01be0..c264122d881 100644 --- a/cmake/developer_package/ncc_naming_style/openvino.style +++ b/cmake/developer_package/ncc_naming_style/openvino.style @@ -1,7 +1,7 @@ # custom OpenVINO values CppMethod: '^(operator\W+|[a-z_\d]+|signaling_NaN|quiet_NaN)$' ClassName: '^([A-Z][\w]+|b?float16|numeric_limits|ngraph_error|stopwatch|unsupported_op)$' -StructName: '^([A-Z][\w]+|element_type_traits|hash|oi_pair)$' +StructName: '^([A-Z][\w]+|element_type_traits|hash|oi_pair|stat)$' FunctionName: '^(operator\W+|[a-z_\d]+)|PrintTo$' Namespace: '^([a-z\d_]*|InferenceEngine)$' NamespaceAlias: '^([a-z\d_]+|InferenceEngine)$' @@ -27,7 +27,7 @@ CxxDynamicCastExpression: '^.*$' # not needed values ClassTemplatePartialSpecialization: '^.*$' ConversionFunction: '^.*$' -UsingDirective: 'XXXX' +UsingDirective: '^.*$' ClassAccessSpecifier: '^.*$' # looks like can be fixed TypeReference: '^.*$' # looks like can be fixed CxxBaseSpecifier: '^.*$' # looks like can be fixed diff --git a/docs/snippets/CMakeLists.txt b/docs/snippets/CMakeLists.txt index 4c0a58d8da7..425e7475038 100644 --- a/docs/snippets/CMakeLists.txt +++ b/docs/snippets/CMakeLists.txt @@ -83,7 +83,7 @@ if(OpenCV_FOUND) endif() # ov_ncc_naming_style(FOR_TARGET "${TARGET_NAME}" -# SOURCE_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}" +# SOURCE_DIRECTORIES "${CMAKE_CURRENT_SOURCE_DIR}" # ADDITIONAL_INCLUDE_DIRECTORIES # $) diff --git a/samples/cpp/CMakeLists.txt b/samples/cpp/CMakeLists.txt index 565a1737c65..d2a65618071 100644 --- a/samples/cpp/CMakeLists.txt +++ b/samples/cpp/CMakeLists.txt @@ -236,7 +236,7 @@ macro(ie_add_sample) endif() if(COMMAND ov_ncc_naming_style AND NOT c_sample) ov_ncc_naming_style(FOR_TARGET "${IE_SAMPLE_NAME}" - SOURCE_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}") + SOURCE_DIRECTORIES "${CMAKE_CURRENT_SOURCE_DIR}") endif() endmacro() diff --git a/src/bindings/python/tests/mock/mock_py_frontend/src/mock_py_frontend.cpp b/src/bindings/python/tests/mock/mock_py_frontend/src/mock_py_frontend.cpp index fc3aa677189..1c8da7cca4e 100644 --- a/src/bindings/python/tests/mock/mock_py_frontend/src/mock_py_frontend.cpp +++ b/src/bindings/python/tests/mock/mock_py_frontend/src/mock_py_frontend.cpp @@ -428,14 +428,14 @@ void FrontEndMockPy::clear_stat() { } // namespace frontend } // namespace ov -MOCK_C_API ov::frontend::FrontEndVersion GetAPIVersion(); -MOCK_C_API void* GetFrontEndData(); +MOCK_C_API ov::frontend::FrontEndVersion get_api_version(); +MOCK_C_API void* get_front_end_data(); -MOCK_C_API ov::frontend::FrontEndVersion GetAPIVersion() { +MOCK_C_API ov::frontend::FrontEndVersion get_api_version() { return OV_FRONTEND_API_VERSION; } -MOCK_C_API void* GetFrontEndData() { +MOCK_C_API void* get_front_end_data() { ov::frontend::FrontEndPluginInfo* res = new ov::frontend::FrontEndPluginInfo(); res->m_name = "mock_py"; res->m_creator = []() { diff --git a/src/common/util/CMakeLists.txt b/src/common/util/CMakeLists.txt index ffd0b982623..a589c283390 100644 --- a/src/common/util/CMakeLists.txt +++ b/src/common/util/CMakeLists.txt @@ -43,6 +43,6 @@ ov_install_static_lib(${TARGET_NAME} ${OV_CPACK_COMP_CORE}) add_clang_format_target(${TARGET_NAME}_clang FOR_TARGETS ${TARGET_NAME}) ov_ncc_naming_style(FOR_TARGET ${TARGET_NAME} - SOURCE_DIRECTORY ${UTIL_INCLUDE_DIR}) + SOURCE_DIRECTORIES ${UTIL_INCLUDE_DIR}) openvino_developer_export_targets(COMPONENT core TARGETS ${TARGET_NAME}) diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index c25f61dfd8c..3beef482255 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt @@ -87,7 +87,7 @@ target_link_libraries(ngraph_obj PRIVATE ngraph::builder ngraph::reference openv ie_mark_target_as_cc(ngraph_obj) ov_ncc_naming_style(FOR_TARGET ngraph_obj - SOURCE_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/include") + SOURCE_DIRECTORIES "${CMAKE_CURRENT_SOURCE_DIR}/include") add_clang_format_target(ngraph_clang FOR_TARGETS ngraph_obj) diff --git a/src/core/tests/frontend/mock_frontend.cpp b/src/core/tests/frontend/mock_frontend.cpp index 4002b7594d5..257c464af36 100644 --- a/src/core/tests/frontend/mock_frontend.cpp +++ b/src/core/tests/frontend/mock_frontend.cpp @@ -193,14 +193,14 @@ public: } }; -MOCK_C_API FrontEndVersion GetAPIVersion(); -MOCK_C_API void* GetFrontEndData(); +MOCK_C_API FrontEndVersion get_api_version(); +MOCK_C_API void* get_front_end_data(); -MOCK_C_API FrontEndVersion GetAPIVersion() { +MOCK_C_API FrontEndVersion get_api_version() { return OV_FRONTEND_API_VERSION; } -MOCK_C_API void* GetFrontEndData() { +MOCK_C_API void* get_front_end_data() { auto* res = new FrontEndPluginInfo(); res->m_name = "mock1"; res->m_creator = []() { diff --git a/src/frontends/common/CMakeLists.txt b/src/frontends/common/CMakeLists.txt index f57b23661fb..e3ead9fdcdf 100644 --- a/src/frontends/common/CMakeLists.txt +++ b/src/frontends/common/CMakeLists.txt @@ -57,7 +57,7 @@ target_compile_definitions(${TARGET_NAME}_obj PRIVATE add_clang_format_target(${TARGET_NAME}_clang FOR_TARGETS ${TARGET_NAME}_obj) ov_ncc_naming_style(FOR_TARGET ${TARGET_NAME}_obj - SOURCE_DIRECTORY "${FRONTEND_INCLUDE_DIR}" + SOURCE_DIRECTORIES "${FRONTEND_INCLUDE_DIR}" ADDITIONAL_INCLUDE_DIRECTORIES $) diff --git a/src/frontends/common/include/openvino/frontend/manager.hpp b/src/frontends/common/include/openvino/frontend/manager.hpp index 4968ef8bbf6..837e5eb8d6d 100644 --- a/src/frontends/common/include/openvino/frontend/manager.hpp +++ b/src/frontends/common/include/openvino/frontend/manager.hpp @@ -97,13 +97,13 @@ FRONTEND_API FrontEnd::Ptr FrontEndManager::load_by_model(const std::vector(ov::util::get_symbol(so, "GetAPIVersion")); + auto info_addr = reinterpret_cast(ov::util::get_symbol(so, "get_api_version")); if (!info_addr) { OPENVINO_DEBUG << "Loaded FrontEnd [" << m_file_path << "] doesn't have API version" << std::endl; return false; @@ -161,7 +161,7 @@ bool PluginInfo::load_internal() { return false; } - auto creator_addr = reinterpret_cast(ov::util::get_symbol(so, "GetFrontEndData")); + auto creator_addr = reinterpret_cast(ov::util::get_symbol(so, "get_front_end_data")); if (!creator_addr) { OPENVINO_DEBUG << "Loaded FrontEnd [" << m_file_path << "] doesn't have Frontend Data" << std::endl; return false; diff --git a/src/frontends/ir/src/frontend.cpp b/src/frontends/ir/src/frontend.cpp index 2c8d5f574f8..27f5af781d0 100644 --- a/src/frontends/ir/src/frontend.cpp +++ b/src/frontends/ir/src/frontend.cpp @@ -23,8 +23,8 @@ namespace frontend { namespace ir { namespace { -inline size_t GetIRVersion(pugi::xml_node& root) { - return XMLParseUtils::GetUIntAttr(root, "version", 0); +inline size_t get_ir_version(pugi::xml_node& root) { + return pugixml::utils::GetUIntAttr(root, "version", 0); } /** @@ -32,7 +32,7 @@ inline size_t GetIRVersion(pugi::xml_node& root) { * @param model Models stream * @return IR version, 0 if model does represent IR */ -size_t GetIRVersion(std::istream& model) { +size_t get_ir_version(std::istream& model) { std::array header{}; model.seekg(0, model.beg); @@ -51,7 +51,7 @@ size_t GetIRVersion(std::istream& model) { std::transform(node_name.begin(), node_name.end(), node_name.begin(), ::tolower); if (node_name == "net") { - return GetIRVersion(root); + return get_ir_version(root); } } @@ -89,9 +89,9 @@ bool FrontEnd::supported_impl(const std::vector& variants) const { size_t version; if (provided_model_stream) { - version = GetIRVersion(*provided_model_stream); + version = get_ir_version(*provided_model_stream); } else if (local_model_stream.is_open()) { - version = GetIRVersion(local_model_stream); + version = get_ir_version(local_model_stream); local_model_stream.close(); } else { return false; @@ -193,7 +193,7 @@ InputModel::Ptr FrontEnd::load_impl(const std::vector& variants) const #else weights_path += ".bin"; #endif - if (!FileUtils::fileExist(weights_path)) { + if (!ov::util::file_exists(weights_path)) { weights_path.clear(); } } @@ -238,11 +238,11 @@ std::string FrontEnd::get_name() const { } // namespace frontend } // namespace ov -IR_C_API ov::frontend::FrontEndVersion GetAPIVersion() { +IR_C_API ov::frontend::FrontEndVersion get_api_version() { return OV_FRONTEND_API_VERSION; } -IR_C_API void* GetFrontEndData() { +IR_C_API void* get_front_end_data() { frontend::FrontEndPluginInfo* res = new frontend::FrontEndPluginInfo(); res->m_name = "ir"; res->m_creator = []() { diff --git a/src/frontends/ir/src/input_model.cpp b/src/frontends/ir/src/input_model.cpp index 9e54869ceeb..299dcdfb508 100644 --- a/src/frontends/ir/src/input_model.cpp +++ b/src/frontends/ir/src/input_model.cpp @@ -18,9 +18,9 @@ using namespace ngraph; using namespace InferenceEngine; namespace { -void ParsePreProcess(pugi::xml_node& root, - std::shared_ptr weights, - std::shared_ptr f) { +void parse_pre_process(pugi::xml_node& root, + std::shared_ptr weights, + std::shared_ptr f) { /* Preprocessing block can have two preprocessing types: * * @@ -46,7 +46,7 @@ void ParsePreProcess(pugi::xml_node& root, std::string inputName; std::shared_ptr input_node; - inputName = XMLParseUtils::GetStrAttr(ppNode, "reference-layer-name", ""); + inputName = pugixml::utils::GetStrAttr(ppNode, "reference-layer-name", ""); inputName = trim(inputName); if (inputName.empty()) { @@ -110,7 +110,7 @@ void ParsePreProcess(pugi::xml_node& root, auto input_type = input_node->get_output_element_type(0); FOREACH_CHILD (chan, ppNode, "channel") { - auto chanNo = XMLParseUtils::GetUInt64Attr(chan, "id", next_channel_id++); + auto chanNo = pugixml::utils::GetUInt64Attr(chan, "id", next_channel_id++); auto meanNode = chan.child("mean"); if (!meanNode.empty()) { @@ -118,11 +118,11 @@ void ParsePreProcess(pugi::xml_node& root, IE_THROW() << "mean should have at least one of the following attribute: value, size"; } if (meanNode.attribute("value")) { - mean_scalar_values.insert({chanNo, XMLParseUtils::GetFloatAttr(meanNode, "value")}); + mean_scalar_values.insert({chanNo, pugixml::utils::GetFloatAttr(meanNode, "value")}); } if (meanNode.attribute("size") && meanNode.attribute("offset")) { - auto const_size = XMLParseUtils::GetUInt64Attr(meanNode, "size"); - auto const_offset = XMLParseUtils::GetUInt64Attr(meanNode, "offset"); + auto const_size = pugixml::utils::GetUInt64Attr(meanNode, "size"); + auto const_offset = pugixml::utils::GetUInt64Attr(meanNode, "offset"); if (shape_size(mean_shape) * input_type.size() != const_size) { IE_THROW() << "mean blob size mismatch expected input, got: " << const_size << " expecting " << mean_shape << " x " << input_type.size(); @@ -229,12 +229,12 @@ std::shared_ptr InputModel::InputModelIRImpl::convert() { std::unordered_map> variables; // Load default opsets - size_t version = XMLParseUtils::GetUIntAttr(m_root, "version", 0); + size_t version = pugixml::utils::GetUIntAttr(m_root, "version", 0); ov::XmlDeserializer visitor(m_root, m_weights, m_opsets, m_extensions, variables, version); std::shared_ptr function; visitor.on_attribute("net", function); function->get_rt_info()["version"] = int64_t(version); - ParsePreProcess(m_root, m_weights, function); + parse_pre_process(m_root, m_weights, function); return function; } diff --git a/src/frontends/ir/src/ir_deserializer.cpp b/src/frontends/ir/src/ir_deserializer.cpp index 44b57d422e4..72eaeb9b07a 100644 --- a/src/frontends/ir/src/ir_deserializer.cpp +++ b/src/frontends/ir/src/ir_deserializer.cpp @@ -28,20 +28,20 @@ XmlDeserializer::IoMap XmlDeserializer::updated_io_map(const pugi::xml_node& nod auto extend_io_map = io_map; FOREACH_CHILD (layer, body_node.child("layers"), "layer") { - auto type = XMLParseUtils::GetStrAttr(layer, "type"); + auto type = pugixml::utils::GetStrAttr(layer, "type"); if (type == "Parameter") { - auto id = XMLParseUtils::GetUIntAttr(layer, "id"); + auto id = pugixml::utils::GetUIntAttr(layer, "id"); extend_io_map.inputs.insert({id, -1}); // try add as unconnected } else if (type == "Result") { - auto id = XMLParseUtils::GetUIntAttr(layer, "id"); + auto id = pugixml::utils::GetUIntAttr(layer, "id"); extend_io_map.outputs.insert({id, -1}); // try add as unconnected } } return extend_io_map; } -std::vector> XmlDeserializer::parseInputDescription( +std::vector> XmlDeserializer::parse_input_description( const pugi::xml_node& node, const std::string& body_name, const std::string& port_map_name) { @@ -53,23 +53,23 @@ std::vector> Xml // Parse PortMap: external_port_id for inputs does not always appear in consecutive order std::map input_map; FOREACH_CHILD (input, node.child(port_map_name.c_str()), "input") { - int64_t ext_port_id = XMLParseUtils::GetInt64Attr(input, "external_port_id"); + int64_t ext_port_id = pugixml::utils::GetInt64Attr(input, "external_port_id"); input_map.emplace(ext_port_id, input); } for (const auto& input : input_map) { auto& xml_input = input.second; auto axis_attr = xml_input.attribute("axis"); - int64_t ti_input_index = XMLParseUtils::GetInt64Attr(xml_input, "external_port_id"); - size_t body_parameter_index = XMLParseUtils::GetUIntAttr(xml_input, "internal_layer_id"); + int64_t ti_input_index = pugixml::utils::GetInt64Attr(xml_input, "external_port_id"); + size_t body_parameter_index = pugixml::utils::GetUIntAttr(xml_input, "internal_layer_id"); // if axis is set, then slicing is enabled. Create ngraph::TensorIterator::SlicedInput. if (!axis_attr.empty()) { - size_t axis = XMLParseUtils::GetUIntAttr(xml_input, "axis"); - int64_t start = XMLParseUtils::GetInt64Attr(xml_input, "start", 0); - int64_t stride = XMLParseUtils::GetInt64Attr(xml_input, "stride", 1); - int64_t end = XMLParseUtils::GetInt64Attr(xml_input, "end", -1); - int64_t part_size = XMLParseUtils::GetInt64Attr(xml_input, "part_size", 1); + size_t axis = pugixml::utils::GetUIntAttr(xml_input, "axis"); + int64_t start = pugixml::utils::GetInt64Attr(xml_input, "start", 0); + int64_t stride = pugixml::utils::GetInt64Attr(xml_input, "stride", 1); + int64_t end = pugixml::utils::GetInt64Attr(xml_input, "end", -1); + int64_t part_size = pugixml::utils::GetInt64Attr(xml_input, "part_size", 1); const auto input_index = up_io_map.inputs.at(body_parameter_index); @@ -84,10 +84,10 @@ std::vector> Xml // otherwise find corresponding back edge and create ngraph::TensorIterator::MergedInput bool is_back_edge_exist = false; FOREACH_CHILD (xml_edge, node.child("back_edges"), "edge") { - size_t to_layer = XMLParseUtils::GetUIntAttr(xml_edge, "to-layer"); + size_t to_layer = pugixml::utils::GetUIntAttr(xml_edge, "to-layer"); if (to_layer == body_parameter_index) { - size_t from_layer = XMLParseUtils::GetUIntAttr(xml_edge, "from-layer"); + size_t from_layer = pugixml::utils::GetUIntAttr(xml_edge, "from-layer"); const auto input_index = up_io_map.inputs.at(body_parameter_index); const auto output_index = up_io_map.outputs.at(from_layer); @@ -117,9 +117,9 @@ std::vector> Xml } std::vector> -XmlDeserializer::parseOutputDescription(const pugi::xml_node& node, - const std::string& body_name, - const std::string& port_map_name) { +XmlDeserializer::parse_output_description(const pugi::xml_node& node, + const std::string& body_name, + const std::string& port_map_name) { std::vector> outputs; auto body_node = node.child(body_name.c_str()); const auto up_io_map = updated_io_map(node, body_node); @@ -127,7 +127,7 @@ XmlDeserializer::parseOutputDescription(const pugi::xml_node& node, // Parse PortMap: outputs std::map output_map; FOREACH_CHILD (output, node.child(port_map_name.c_str()), "output") { - int64_t ext_port_id = XMLParseUtils::GetInt64Attr(output, "external_port_id"); + int64_t ext_port_id = pugixml::utils::GetInt64Attr(output, "external_port_id"); output_map.emplace(ext_port_id, output); } @@ -135,19 +135,19 @@ XmlDeserializer::parseOutputDescription(const pugi::xml_node& node, for (const auto& output : output_map) { auto& xml_output = output.second; auto axis_attr = xml_output.attribute("axis"); - size_t body_result_index = XMLParseUtils::GetUIntAttr(xml_output, "internal_layer_id"); + size_t body_result_index = pugixml::utils::GetUIntAttr(xml_output, "internal_layer_id"); // if external_port_id < 0 it means that this body result isn't connected to the Loop output // and is used only for internal needs. For TensorIterator external_port_id is always > 0. - if (XMLParseUtils::GetInt64Attr(xml_output, "external_port_id") >= 0) { + if (pugixml::utils::GetInt64Attr(xml_output, "external_port_id") >= 0) { // if axis is set, then concatenation is enabled. Create // ngraph::TensorIterator::ConcatOutput. if (!axis_attr.empty()) { - int64_t axis = XMLParseUtils::GetInt64Attr(xml_output, "axis"); - int64_t start = XMLParseUtils::GetInt64Attr(xml_output, "start", 0); - int64_t stride = XMLParseUtils::GetInt64Attr(xml_output, "stride", 1); - int64_t end = XMLParseUtils::GetInt64Attr(xml_output, "end", -1); - int64_t part_size = XMLParseUtils::GetInt64Attr(xml_output, "part_size", 1); + int64_t axis = pugixml::utils::GetInt64Attr(xml_output, "axis"); + int64_t start = pugixml::utils::GetInt64Attr(xml_output, "start", 0); + int64_t stride = pugixml::utils::GetInt64Attr(xml_output, "stride", 1); + int64_t end = pugixml::utils::GetInt64Attr(xml_output, "end", -1); + int64_t part_size = pugixml::utils::GetInt64Attr(xml_output, "part_size", 1); const auto output_index = up_io_map.outputs.at(body_result_index); @@ -174,7 +174,7 @@ XmlDeserializer::parseOutputDescription(const pugi::xml_node& node, return outputs; } -ngraph::op::v5::Loop::SpecialBodyPorts XmlDeserializer::parsePurposeAttribute(const pugi::xml_node& node) { +ngraph::op::v5::Loop::SpecialBodyPorts XmlDeserializer::parse_purpose_attribute(const pugi::xml_node& node) { ngraph::op::v5::Loop::SpecialBodyPorts result = {-1, -1}; auto body_node = node.child("body"); const auto up_io_map = updated_io_map(node, body_node); @@ -186,19 +186,19 @@ ngraph::op::v5::Loop::SpecialBodyPorts XmlDeserializer::parsePurposeAttribute(co // order std::map input_map; FOREACH_CHILD (input, node.child("port_map"), "input") { - int64_t ext_port_id = XMLParseUtils::GetInt64Attr(input, "external_port_id"); + int64_t ext_port_id = pugixml::utils::GetInt64Attr(input, "external_port_id"); input_map.emplace(ext_port_id, input); } std::map output_map; FOREACH_CHILD (output, node.child("port_map"), "output") { - int64_t ext_port_id = XMLParseUtils::GetInt64Attr(output, "external_port_id"); + int64_t ext_port_id = pugixml::utils::GetInt64Attr(output, "external_port_id"); output_map.emplace(ext_port_id, output); } for (const auto& input : input_map) { auto& xml_input = input.second; - auto purpose = XMLParseUtils::GetStrAttr(xml_input, "purpose", ""); - size_t body_parameter_index = XMLParseUtils::GetUIntAttr(xml_input, "internal_layer_id"); + auto purpose = pugixml::utils::GetStrAttr(xml_input, "purpose", ""); + size_t body_parameter_index = pugixml::utils::GetUIntAttr(xml_input, "internal_layer_id"); if (purpose == "current_iteration") { result.current_iteration_input_idx = up_io_map.inputs.at(body_parameter_index); } @@ -206,8 +206,8 @@ ngraph::op::v5::Loop::SpecialBodyPorts XmlDeserializer::parsePurposeAttribute(co for (const auto& output : output_map) { auto& xml_output = output.second; - auto purpose = XMLParseUtils::GetStrAttr(xml_output, "purpose", ""); - size_t body_parameter_index = XMLParseUtils::GetUIntAttr(xml_output, "internal_layer_id"); + auto purpose = pugixml::utils::GetStrAttr(xml_output, "purpose", ""); + size_t body_parameter_index = pugixml::utils::GetUIntAttr(xml_output, "internal_layer_id"); if (purpose == "execution_condition") { result.body_condition_output_idx = up_io_map.outputs.at(body_parameter_index); } @@ -239,13 +239,13 @@ void XmlDeserializer::on_adapter(const std::string& name, ngraph::ValueAccessor< } if (auto a = ngraph::as_type>>>(&adapter)) { - a->set(parseInputDescription(m_node, body_name, port_map_name)); + a->set(parse_input_description(m_node, body_name, port_map_name)); } else if (auto a = ngraph::as_type>>>(&adapter)) { - a->set(parseOutputDescription(m_node, body_name, port_map_name)); + a->set(parse_output_description(m_node, body_name, port_map_name)); } else if (auto a = ngraph::as_type>(&adapter)) { - a->set(parsePurposeAttribute(m_node)); + a->set(parse_purpose_attribute(m_node)); } } @@ -318,7 +318,7 @@ void XmlDeserializer::on_adapter(const std::string& name, ngraph::ValueAccessor< &adapter)) { std::string value; pugi::xml_node dn = m_node.child("data"); - auto type = XMLParseUtils::GetStrAttr(m_node, "type"); + auto type = pugixml::utils::GetStrAttr(m_node, "type"); if (dn.empty()) IE_THROW() << "No attrtibutes defined for " << type << " op!"; @@ -332,8 +332,8 @@ void XmlDeserializer::on_adapter(const std::string& name, ngraph::ValueAccessor< std::vector shape; std::string el_type_str; - size_t offset = XMLParseUtils::GetUInt64Attr(dn, "offset"); - size_t size = XMLParseUtils::GetUInt64Attr(dn, "size"); + size_t offset = pugixml::utils::GetUInt64Attr(dn, "offset"); + size_t size = pugixml::utils::GetUInt64Attr(dn, "size"); if (!getStrAttribute(dn, "element_type", el_type_str)) return; if (!getParameters(dn, "shape", shape)) @@ -357,8 +357,8 @@ void XmlDeserializer::on_adapter(const std::string& name, ngraph::ValueAccessor< a->set(buffer); } } else if (auto a = ngraph::as_type>(&adapter)) { - const auto& type = XMLParseUtils::GetStrAttr(m_node, "type"); - const auto& version = XMLParseUtils::GetStrAttr(m_node, "version"); + const auto& type = pugixml::utils::GetStrAttr(m_node, "type"); + const auto& version = pugixml::utils::GetStrAttr(m_node, "version"); ngraph::op::FrameworkNodeAttrs node_attrs; node_attrs.set_opset_name(version); @@ -414,25 +414,25 @@ std::shared_ptr XmlDeserializer::parse_function( ngraph::SinkVector sinks; }; - struct edge { + struct Edge { size_t fromLayerId, fromPortId, toPortId; }; - struct node_params { + struct NodeParams { pugi::xml_node xml; GenericLayerParams params; }; - std::map params; + std::map params; std::vector outputs; std::unordered_set opName; std::vector order; std::set dfs_used_nodes; - std::map> edges; + std::map> edges; // Read all layers and store their parameters in params map FOREACH_CHILD (node, root.child("layers"), "layer") { - auto node_param = parseGenericParams(node); + auto node_param = parse_generic_params(node); if (opName.find(node_param.name) != opName.end() && node_param.type != "Result") IE_THROW() << "Invalid IR! " << node_param.name << " name is not unique!"; opName.insert(node_param.name); @@ -451,10 +451,10 @@ std::shared_ptr XmlDeserializer::parse_function( // Read all edges and store them for further usage FOREACH_CHILD (_ec, root.child("edges"), "edge") { - size_t fromLayer = XMLParseUtils::GetUIntAttr(_ec, "from-layer"); - size_t fromPort = XMLParseUtils::GetUIntAttr(_ec, "from-port"); - size_t toLayer = XMLParseUtils::GetUIntAttr(_ec, "to-layer"); - size_t toPort = XMLParseUtils::GetUIntAttr(_ec, "to-port"); + size_t fromLayer = pugixml::utils::GetUIntAttr(_ec, "from-layer"); + size_t fromPort = pugixml::utils::GetUIntAttr(_ec, "from-port"); + size_t toLayer = pugixml::utils::GetUIntAttr(_ec, "to-layer"); + size_t toPort = pugixml::utils::GetUIntAttr(_ec, "to-port"); edges[toLayer].push_back({fromLayer, fromPort, toPort}); } @@ -489,14 +489,14 @@ std::shared_ptr XmlDeserializer::parse_function( IE_THROW() << "Attempt to access node " << e.fromLayerId << " that not in graph."; } auto& p_output = params[e.fromLayerId].params; - size_t const realInputPortId = p.params.getRealInputPortId(e.toPortId); + size_t const realInputPortId = p.params.get_real_input_port_id(e.toPortId); if (realInputPortId >= inputs.size()) IE_THROW() << p.params.type << " layer " << p.params.name << " with id: " << p.params.layerId << " is inconsistent!"; - inputs[realInputPortId] = input_node->output(p_output.getRealOutputPortId(e.fromPortId)); + inputs[realInputPortId] = input_node->output(p_output.get_real_output_port_id(e.fromPortId)); } - auto node = createNode(inputs, p.xml, weights, p.params); + auto node = create_node(inputs, p.xml, weights, p.params); id_to_node[layer_id] = node; // Check that output shape after OpenVINO node validation the same as in IR @@ -537,7 +537,7 @@ std::shared_ptr XmlDeserializer::parse_function( auto function = std::make_shared(func_nodes.results, func_nodes.sinks, func_nodes.parameters, - XMLParseUtils::GetStrAttr(root, "name", "")); + pugixml::utils::GetStrAttr(root, "name", "")); for (const auto& sink : func_nodes.sinks) { if (const auto& assign = std::dynamic_pointer_cast(sink)) { assign->add_control_dependency(variable_id_to_read_value.at(assign->get_variable_id())); @@ -582,9 +582,9 @@ private: ov::Any parse_value(const pugi::xml_node& node) const { if (has_attr(node)) { - return XMLParseUtils::GetStrAttr(node, "value"); + return pugixml::utils::GetStrAttr(node, "value"); } else if (std::string(node.name()) == "unset" && has_attr(node, "unset_cli_parameters")) { - return XMLParseUtils::GetStrAttr(node, "unset_cli_parameters"); + return pugixml::utils::GetStrAttr(node, "unset_cli_parameters"); } else { return parse_node(node); } @@ -636,7 +636,7 @@ void XmlDeserializer::read_meta_data(const std::shared_ptr& model, co if (data.empty()) continue; if (!data.attribute("value").empty()) { - rt_info[data.name()] = XMLParseUtils::GetStrAttr(data, "value"); + rt_info[data.name()] = pugixml::utils::GetStrAttr(data, "value"); } else { // Use meta data for set of parameters std::shared_ptr meta = std::make_shared(data.name(), data); @@ -660,7 +660,7 @@ void XmlDeserializer::read_legacy_meta_data(const std::shared_ptr& mo std::shared_ptr meta = std::make_shared("cli_parameters", data); rt_info["conversion_parameters"] = meta; } else if (!data.attribute("value").empty()) { - rt_info[data.name()] = XMLParseUtils::GetStrAttr(data, "value"); + rt_info[data.name()] = pugixml::utils::GetStrAttr(data, "value"); } else { OPENVINO_THROW("Unsupported legacy argument: ", data.name()); } @@ -675,13 +675,13 @@ void XmlDeserializer::read_legacy_meta_data(const std::shared_ptr& mo read_meta(model, it, root_section.child(it.c_str())); } -GenericLayerParams XmlDeserializer::parseGenericParams(const pugi::xml_node& node) { +GenericLayerParams XmlDeserializer::parse_generic_params(const pugi::xml_node& node) { const auto parsePort = [](const pugi::xml_node& parentNode, const GenericLayerParams& params, bool input) -> GenericLayerParams::LayerPortData { GenericLayerParams::LayerPortData port; - port.portId = XMLParseUtils::GetUIntAttr(parentNode, "id"); + port.portId = pugixml::utils::GetUIntAttr(parentNode, "id"); FOREACH_CHILD (node, parentNode, "dim") { int64_t dim = 0; @@ -697,7 +697,7 @@ GenericLayerParams XmlDeserializer::parseGenericParams(const pugi::xml_node& nod ngraph::element::Type type(ngraph::element::Type_t::undefined); // Input port hasn't precision if (!input) { - const std::string& preStr = XMLParseUtils::GetStrAttr(parentNode, "precision"); + const std::string& preStr = pugixml::utils::GetStrAttr(parentNode, "precision"); type = InferenceEngine::details::convertPrecision(preStr); } port.precision = type; @@ -721,12 +721,12 @@ GenericLayerParams XmlDeserializer::parseGenericParams(const pugi::xml_node& nod }; GenericLayerParams params; - params.layerId = XMLParseUtils::GetUIntAttr(node, "id"); - params.version = XMLParseUtils::GetStrAttr(node, "version"); + params.layerId = pugixml::utils::GetUIntAttr(node, "id"); + params.version = pugixml::utils::GetStrAttr(node, "version"); - params.type = XMLParseUtils::GetStrAttr(node, "type"); + params.type = pugixml::utils::GetStrAttr(node, "type"); - params.name = XMLParseUtils::GetStrAttr(node, "name"); + params.name = pugixml::utils::GetStrAttr(node, "name"); auto outNode = node.child("output"); if (!outNode.empty()) { @@ -753,7 +753,7 @@ static const std::string& translate_type_name(const std::string& name) { return name; } -std::shared_ptr XmlDeserializer::createNode( +std::shared_ptr XmlDeserializer::create_node( const std::vector>& inputs, const pugi::xml_node& node, const std::shared_ptr& weights, diff --git a/src/frontends/ir/src/ir_deserializer.hpp b/src/frontends/ir/src/ir_deserializer.hpp index f0fe3b0be8a..b9cd35701a7 100644 --- a/src/frontends/ir/src/ir_deserializer.hpp +++ b/src/frontends/ir/src/ir_deserializer.hpp @@ -35,7 +35,7 @@ struct GenericLayerParams { std::vector inputPorts; std::vector outputPorts; - size_t getRealInputPortId(size_t id) const { + size_t get_real_input_port_id(size_t id) const { size_t real_id = 0; for (auto& it : inputPorts) { if (it.portId == id) { @@ -46,7 +46,7 @@ struct GenericLayerParams { IE_THROW() << "Can not find input port with id " << id << " in layer " << name; } - size_t getRealOutputPortId(size_t id) const { + size_t get_real_output_port_id(size_t id) const { size_t real_id = 0; for (auto& it : outputPorts) { if (it.portId == id) { @@ -152,12 +152,14 @@ private: /// Shall be used only for ops which have port_map attribute. /// \param node xml op representation std::vector> - parseInputDescription(const pugi::xml_node& node, const std::string& body_name, const std::string& port_map_name); + parse_input_description(const pugi::xml_node& node, const std::string& body_name, const std::string& port_map_name); /// \brief Traverses port_map in order to create vector of OutputDescription shared_ptrs. /// Shall be used only for ops which have port_map attribute. /// \param node xml op representation - std::vector> - parseOutputDescription(const pugi::xml_node& node, const std::string& body_name, const std::string& port_map_name); + std::vector> parse_output_description( + const pugi::xml_node& node, + const std::string& body_name, + const std::string& port_map_name); // TODO consider to call only once per layer/TI-Loop node IoMap updated_io_map(const pugi::xml_node& node, const pugi::xml_node& body_node); @@ -171,14 +173,14 @@ private: /// \brief Traverses xml node representation in order to get the purpose attribute of /// inputs/outputs in the body of Loop op. \param node xml node representation \return struct /// with value of purpuse attribute - ov::op::v5::Loop::SpecialBodyPorts parsePurposeAttribute(const pugi::xml_node& node); + ov::op::v5::Loop::SpecialBodyPorts parse_purpose_attribute(const pugi::xml_node& node); - GenericLayerParams parseGenericParams(const pugi::xml_node& node); + GenericLayerParams parse_generic_params(const pugi::xml_node& node); - std::shared_ptr createNode(const ov::OutputVector& inputs, - const pugi::xml_node& node, - const std::shared_ptr& weights, - const GenericLayerParams& params); + std::shared_ptr create_node(const ov::OutputVector& inputs, + const pugi::xml_node& node, + const std::shared_ptr& weights, + const GenericLayerParams& params); void read_meta_data(const std::shared_ptr& model, const pugi::xml_node& meta_section); diff --git a/src/frontends/ir/src/os/win/win_mmap_object.cpp b/src/frontends/ir/src/os/win/win_mmap_object.cpp index 354c8afd9da..2a1bbd85c46 100644 --- a/src/frontends/ir/src/os/win/win_mmap_object.cpp +++ b/src/frontends/ir/src/os/win/win_mmap_object.cpp @@ -105,12 +105,12 @@ private: m_size); OPENVINO_ASSERT(m_data, "Can not create map view for ", path); } else { - m_data = NULL; + m_data = nullptr; } } private: - void* m_data = NULL; + void* m_data = nullptr; size_t m_size = 0; HandleHolder m_handle; HandleHolder m_mapping; diff --git a/src/frontends/onnx/frontend/CMakeLists.txt b/src/frontends/onnx/frontend/CMakeLists.txt index 67b48262131..2edeaae86ec 100644 --- a/src/frontends/onnx/frontend/CMakeLists.txt +++ b/src/frontends/onnx/frontend/CMakeLists.txt @@ -19,7 +19,7 @@ set(ONNX_OPSET_VERSION 17 CACHE INTERNAL "Supported version of ONNX operator set target_compile_definitions(${TARGET_NAME} PRIVATE ONNX_OPSET_VERSION=${ONNX_OPSET_VERSION}) ov_ncc_naming_style(FOR_TARGET ${TARGET_NAME} - SOURCE_DIRECTORY "${${TARGET_NAME}_INCLUDE_DIR}" + SOURCE_DIRECTORIES "${${TARGET_NAME}_INCLUDE_DIR}" DEFINITIONS $ ADDITIONAL_INCLUDE_DIRECTORIES diff --git a/src/frontends/onnx/frontend/src/frontend.cpp b/src/frontends/onnx/frontend/src/frontend.cpp index d1a4d14ddb7..056fa8dfd8c 100644 --- a/src/frontends/onnx/frontend/src/frontend.cpp +++ b/src/frontends/onnx/frontend/src/frontend.cpp @@ -25,11 +25,11 @@ using namespace ov; using namespace ov::frontend::onnx; -ONNX_FRONTEND_C_API ov::frontend::FrontEndVersion GetAPIVersion() { +ONNX_FRONTEND_C_API ov::frontend::FrontEndVersion get_api_version() { return OV_FRONTEND_API_VERSION; } -ONNX_FRONTEND_C_API void* GetFrontEndData() { +ONNX_FRONTEND_C_API void* get_front_end_data() { ov::frontend::FrontEndPluginInfo* res = new ov::frontend::FrontEndPluginInfo(); res->m_name = "onnx"; res->m_creator = []() { diff --git a/src/frontends/paddle/src/decoder_proto.cpp b/src/frontends/paddle/src/decoder_proto.cpp index 80c14bad144..f286bfcf1f8 100644 --- a/src/frontends/paddle/src/decoder_proto.cpp +++ b/src/frontends/paddle/src/decoder_proto.cpp @@ -21,17 +21,23 @@ namespace paddle { using namespace ::paddle::framework; -std::map TYPE_MAP{ - {proto::VarType_Type::VarType_Type_BOOL, ov::element::boolean}, - {proto::VarType_Type::VarType_Type_INT16, ov::element::i16}, - {proto::VarType_Type::VarType_Type_INT32, ov::element::i32}, - {proto::VarType_Type::VarType_Type_INT64, ov::element::i64}, - {proto::VarType_Type::VarType_Type_FP16, ov::element::f16}, - {proto::VarType_Type::VarType_Type_FP32, ov::element::f32}, - {proto::VarType_Type::VarType_Type_FP64, ov::element::f64}, - {proto::VarType_Type::VarType_Type_UINT8, ov::element::u8}, - {proto::VarType_Type::VarType_Type_INT8, ov::element::i8}, - {proto::VarType_Type::VarType_Type_BF16, ov::element::bf16}}; +ov::element::Type get_ov_type(const ::paddle::framework::proto::VarType_Type& type) { + static const std::map type_map{ + {proto::VarType_Type::VarType_Type_BOOL, ov::element::boolean}, + {proto::VarType_Type::VarType_Type_INT16, ov::element::i16}, + {proto::VarType_Type::VarType_Type_INT32, ov::element::i32}, + {proto::VarType_Type::VarType_Type_INT64, ov::element::i64}, + {proto::VarType_Type::VarType_Type_FP16, ov::element::f16}, + {proto::VarType_Type::VarType_Type_FP32, ov::element::f32}, + {proto::VarType_Type::VarType_Type_FP64, ov::element::f64}, + {proto::VarType_Type::VarType_Type_UINT8, ov::element::u8}, + {proto::VarType_Type::VarType_Type_INT8, ov::element::i8}, + {proto::VarType_Type::VarType_Type_BF16, ov::element::bf16}}; + + auto it = type_map.find(type); + OPENVINO_ASSERT(it != type_map.end(), "Cannot convert PDPD type to ov::element::Type"); + return it->second; +} ov::Any DecoderProto::get_attribute(const std::string& name) const { auto attrs = decode_attribute_helper(name); @@ -71,12 +77,12 @@ ov::Any DecoderProto::get_attribute(const std::string& name) const { ov::Any DecoderProto::convert_attribute(const Any& data, const std::type_info& type_info) const { if (data.is() && type_info == typeid(ov::element::Type)) { - return TYPE_MAP.at(static_cast(data.as())); + return get_ov_type(static_cast(data.as())); } else if (data.is>() && type_info == typeid(std::vector)) { const auto& casted = data.as>(); std::vector types(casted.size()); for (size_t i = 0; i < casted.size(); ++i) { - types[i] = TYPE_MAP.at(static_cast(casted[i])); + types[i] = get_ov_type(static_cast(casted[i])); } return types; } diff --git a/src/frontends/paddle/src/decoder_proto.hpp b/src/frontends/paddle/src/decoder_proto.hpp index f78708ed51a..11627c6fba6 100644 --- a/src/frontends/paddle/src/decoder_proto.hpp +++ b/src/frontends/paddle/src/decoder_proto.hpp @@ -22,7 +22,8 @@ namespace ov { namespace frontend { namespace paddle { -extern std::map<::paddle::framework::proto::VarType_Type, ov::element::Type> TYPE_MAP; + +ov::element::Type get_ov_type(const ::paddle::framework::proto::VarType_Type& type); class DecoderProto : public paddle::DecoderBase { public: diff --git a/src/frontends/paddle/src/frontend.cpp b/src/frontends/paddle/src/frontend.cpp index ecfe370f9b2..831bf33f583 100644 --- a/src/frontends/paddle/src/frontend.cpp +++ b/src/frontends/paddle/src/frontend.cpp @@ -283,7 +283,7 @@ std::map> FrontEnd::convert_each_node_recurs const auto& var_name = port.arguments()[static_cast(idx)]; ng_outputs[idx].get_tensor().set_names({var_name}); // if nodes_dict already has node mapped to this tensor name it - // usually means that it was overwritten using setTensorValue + // usually means that it was overwritten using set_tensor_value nodes_dict[var_name] = ng_outputs[idx]; } } @@ -534,11 +534,11 @@ void FrontEnd::add_extension(const std::shared_ptr& extension) { } // namespace frontend } // namespace ov -PADDLE_C_API FrontEndVersion GetAPIVersion() { +PADDLE_C_API FrontEndVersion get_api_version() { return OV_FRONTEND_API_VERSION; } -PADDLE_C_API void* GetFrontEndData() { +PADDLE_C_API void* get_front_end_data() { FrontEndPluginInfo* res = new FrontEndPluginInfo(); res->m_name = "paddle"; res->m_creator = []() { diff --git a/src/frontends/paddle/src/input_model.cpp b/src/frontends/paddle/src/input_model.cpp index 9cd0d647021..a9f4ae1d60c 100644 --- a/src/frontends/paddle/src/input_model.cpp +++ b/src/frontends/paddle/src/input_model.cpp @@ -36,17 +36,17 @@ public: InputModelImpl(const std::vector& streams, const InputModel& input_model, const std::shared_ptr& telemetry); - std::vector getInputs() const; - std::vector getOutputs() const; - Place::Ptr getPlaceByTensorName(const std::string& tensorName) const; - void overrideAllOutputs(const std::vector& outputs); - void overrideAllInputs(const std::vector& inputs); - void extractSubgraph(const std::vector& inputs, const std::vector& outputs); - void setDefaultShape(Place::Ptr place, const ov::Shape&); - void setPartialShape(Place::Ptr place, const ov::PartialShape&); - ov::PartialShape getPartialShape(Place::Ptr place) const; - void setElementType(Place::Ptr place, const ov::element::Type&); - void setTensorValue(Place::Ptr place, const void* value); + std::vector get_inputs() const; + std::vector get_outputs() const; + Place::Ptr get_place_by_tensor_name(const std::string& tensorName) const; + void override_all_outputs(const std::vector& outputs); + void override_all_inputs(const std::vector& inputs); + void extract_subgraph(const std::vector& inputs, const std::vector& outputs); + void set_default_shape(Place::Ptr place, const ov::Shape&); + void set_partial_shape(Place::Ptr place, const ov::PartialShape&); + ov::PartialShape get_partial_shape(Place::Ptr place) const; + void set_element_type(Place::Ptr place, const ov::element::Type&); + void set_tensor_value(Place::Ptr place, const void* value); std::vector> get_op_places(const int32_t blck_idx) const; std::map> get_var_places() const { @@ -57,10 +57,10 @@ public: }; private: - void loadPlaces(); + void load_places(); template void loadConsts(const std::basic_string& folder_with_weights, std::istream* weight_stream); - void createTempConsts(); + void create_temp_consts(); std::vector> determine_cut_nodes() const; std::vector>> m_op_places; @@ -77,7 +77,7 @@ private: bool m_graph_changed = false; }; -void InputModel::InputModelImpl::loadPlaces() { +void InputModel::InputModelImpl::load_places() { const int cnt_of_blocks = m_fw_ptr->blocks_size(); const auto& blocks = m_fw_ptr->blocks(); std::map op_statistics; @@ -138,7 +138,7 @@ void InputModel::InputModelImpl::loadPlaces() { const auto& tensor_desc = var_place->get_desc().type().lod_tensor().tensor(); const auto& dims = tensor_desc.dims(); - var_place->set_element_type(TYPE_MAP[tensor_desc.data_type()]); + var_place->set_element_type(get_ov_type(tensor_desc.data_type())); var_place->set_partial_shape(PartialShape(std::vector(dims.begin(), dims.end()))); m_inputs.push_back(var_place); } else if (op.type() == "fetch") { @@ -226,7 +226,7 @@ std::vector> InputModel::InputModelImpl::determine_cut_ std::vector> new_op_places; new_op_places.reserve(m_op_places[0].size()); // Marking nodes from outputs to inputs/constants - for (const auto& output : getOutputs()) { + for (const auto& output : get_outputs()) { if (!output->is_input()) { auto paddle_output_op = std::dynamic_pointer_cast(output->get_producing_operation()); FRONT_END_GENERAL_CHECK(paddle_output_op != nullptr, "Output doesn't have producing operation"); @@ -298,7 +298,7 @@ void InputModel::InputModelImpl::loadConsts(const std::basic_string& folder_w new ::paddle::framework::proto::VarType_TensorDesc()); tensor_desc->ParseFromArray(buf.get(), size); Shape shape(tensor_desc->dims().cbegin(), tensor_desc->dims().cend()); - const auto& type = TYPE_MAP[tensor_desc->data_type()]; + const auto& type = get_ov_type(tensor_desc->data_type()); const auto& data_length = shape_size(shape) * type.size(); std::vector tensor_data(data_length); @@ -344,16 +344,16 @@ InputModel::InputModelImpl::InputModelImpl(const std::basic_string& path, FRONT_END_GENERAL_CHECK( version >= 2000000 || version == 0, "[Frontend]Only Support Paddle greater than 2.0.0, current version " + std::to_string(version)); - loadPlaces(); + load_places(); if (weights_stream && weights_stream.is_open()) { loadConsts(std::basic_string{}, &weights_stream); } else { loadConsts(path, nullptr); } - createTempConsts(); + create_temp_consts(); } -void InputModel::InputModelImpl::createTempConsts() { +void InputModel::InputModelImpl::create_temp_consts() { for (const auto& item : m_var_places) { const auto& var_place = item.second; const auto& var_desc = var_place->get_desc(); @@ -370,7 +370,7 @@ void InputModel::InputModelImpl::createTempConsts() { // shape, we simply the the first dimension be 0. if (var_desc.type().has_tensor_array()) { const auto& tensor = var_desc.type().tensor_array().tensor(); - const auto& type = TYPE_MAP[tensor.data_type()]; + const auto& type = get_ov_type(tensor.data_type()); std::cout << "WARNING: The PaddlePaddle model has \"TENSOR_ARRAY\" variables, which is supported " << " under limited situations.\n"; @@ -420,21 +420,21 @@ InputModel::InputModelImpl::InputModelImpl(const std::vector& str FRONT_END_GENERAL_CHECK( version >= 2000000 || version == 0, "[Frontend]Only Support Paddle greater than 2.0.0, current version " + std::to_string(version)); - loadPlaces(); + load_places(); if (streams.size() > 1) loadConsts(std::string(), streams[1]); - createTempConsts(); + create_temp_consts(); } -std::vector InputModel::InputModelImpl::getInputs() const { +std::vector InputModel::InputModelImpl::get_inputs() const { return m_inputs; } -std::vector InputModel::InputModelImpl::getOutputs() const { +std::vector InputModel::InputModelImpl::get_outputs() const { return m_outputs; } -Place::Ptr InputModel::InputModelImpl::getPlaceByTensorName(const std::string& tensorName) const { +Place::Ptr InputModel::InputModelImpl::get_place_by_tensor_name(const std::string& tensorName) const { if (m_var_places.count(tensorName)) return m_var_places.at(tensorName); return nullptr; @@ -454,7 +454,7 @@ std::shared_ptr castToTensorPlace(const Place::Ptr& place) { } // namespace -void InputModel::InputModelImpl::overrideAllInputs(const std::vector& inputs) { +void InputModel::InputModelImpl::override_all_inputs(const std::vector& inputs) { m_graph_changed = true; m_inputs.clear(); for (const auto& inp : inputs) { @@ -462,7 +462,7 @@ void InputModel::InputModelImpl::overrideAllInputs(const std::vector } } -void InputModel::InputModelImpl::overrideAllOutputs(const std::vector& outputs) { +void InputModel::InputModelImpl::override_all_outputs(const std::vector& outputs) { m_graph_changed = true; m_outputs.clear(); for (const auto& outp : outputs) { @@ -470,30 +470,30 @@ void InputModel::InputModelImpl::overrideAllOutputs(const std::vector& inputs, - const std::vector& outputs) { +void InputModel::InputModelImpl::extract_subgraph(const std::vector& inputs, + const std::vector& outputs) { m_graph_changed = true; - overrideAllInputs(inputs); - overrideAllOutputs(outputs); + override_all_inputs(inputs); + override_all_outputs(outputs); } -void InputModel::InputModelImpl::setDefaultShape(Place::Ptr place, const ov::Shape& shape) { - FRONT_END_NOT_IMPLEMENTED("setDefaultShape"); +void InputModel::InputModelImpl::set_default_shape(Place::Ptr place, const ov::Shape& shape) { + FRONT_END_NOT_IMPLEMENTED("set_default_shape"); } -void InputModel::InputModelImpl::setPartialShape(Place::Ptr place, const ov::PartialShape& p_shape) { +void InputModel::InputModelImpl::set_partial_shape(Place::Ptr place, const ov::PartialShape& p_shape) { castToTensorPlace(place)->set_partial_shape(p_shape); } -ov::PartialShape InputModel::InputModelImpl::getPartialShape(Place::Ptr place) const { +ov::PartialShape InputModel::InputModelImpl::get_partial_shape(Place::Ptr place) const { return castToTensorPlace(place)->get_partial_shape(); } -void InputModel::InputModelImpl::setElementType(Place::Ptr place, const ov::element::Type& type) { +void InputModel::InputModelImpl::set_element_type(Place::Ptr place, const ov::element::Type& type) { castToTensorPlace(place)->set_element_type(type); } -void InputModel::InputModelImpl::setTensorValue(Place::Ptr place, const void* value) { +void InputModel::InputModelImpl::set_tensor_value(Place::Ptr place, const void* value) { m_graph_changed = true; auto tensor_place = castToTensorPlace(place); auto p_shape = tensor_place->get_partial_shape(); @@ -528,43 +528,43 @@ std::map> InputModel::get_tensor_values() const } std::vector InputModel::get_inputs() const { - return _impl->getInputs(); + return _impl->get_inputs(); } std::vector InputModel::get_outputs() const { - return _impl->getOutputs(); + return _impl->get_outputs(); } Place::Ptr InputModel::get_place_by_tensor_name(const std::string& tensorName) const { - return _impl->getPlaceByTensorName(tensorName); + return _impl->get_place_by_tensor_name(tensorName); } void InputModel::override_all_outputs(const std::vector& outputs) { - _impl->overrideAllOutputs(outputs); + _impl->override_all_outputs(outputs); } void InputModel::override_all_inputs(const std::vector& inputs) { - _impl->overrideAllInputs(inputs); + _impl->override_all_inputs(inputs); } void InputModel::extract_subgraph(const std::vector& inputs, const std::vector& outputs) { - _impl->extractSubgraph(inputs, outputs); + _impl->extract_subgraph(inputs, outputs); } void InputModel::set_partial_shape(const Place::Ptr& place, const ov::PartialShape& p_shape) { - _impl->setPartialShape(place, p_shape); + _impl->set_partial_shape(place, p_shape); } ov::PartialShape InputModel::get_partial_shape(const Place::Ptr& place) const { - return _impl->getPartialShape(place); + return _impl->get_partial_shape(place); } void InputModel::set_element_type(const Place::Ptr& place, const ov::element::Type& type) { - _impl->setElementType(place, type); + _impl->set_element_type(place, type); } void InputModel::set_tensor_value(const Place::Ptr& place, const void* value) { - _impl->setTensorValue(place, value); + _impl->set_tensor_value(place, value); } } // namespace paddle diff --git a/src/frontends/paddle/src/place.cpp b/src/frontends/paddle/src/place.cpp index 8a5a4663235..7af2bc07bbf 100644 --- a/src/frontends/paddle/src/place.cpp +++ b/src/frontends/paddle/src/place.cpp @@ -213,7 +213,7 @@ TensorPlace::TensorPlace(const ov::frontend::InputModel& input_model, const auto& var_type = var_desc.type(); if (var_type.type() == ::paddle::framework::proto::VarType::LOD_TENSOR) { const auto& tensor_desc = var_type.lod_tensor().tensor(); - m_type = TYPE_MAP[tensor_desc.data_type()]; + m_type = get_ov_type(tensor_desc.data_type()); m_pshape = PartialShape(std::vector(tensor_desc.dims().begin(), tensor_desc.dims().end())); } } diff --git a/src/frontends/pytorch/src/pytorch.cpp b/src/frontends/pytorch/src/pytorch.cpp index fddfd299d85..4756a81cb79 100644 --- a/src/frontends/pytorch/src/pytorch.cpp +++ b/src/frontends/pytorch/src/pytorch.cpp @@ -6,11 +6,11 @@ #include "openvino/frontend/pytorch/frontend.hpp" #include "openvino/frontend/pytorch/visibility.hpp" -PYTORCH_C_API ov::frontend::FrontEndVersion GetAPIVersion() { +PYTORCH_C_API ov::frontend::FrontEndVersion get_api_version() { return OV_FRONTEND_API_VERSION; } -PYTORCH_C_API void* GetFrontEndData() { +PYTORCH_C_API void* get_front_end_data() { auto res = new ov::frontend::FrontEndPluginInfo(); res->m_name = "pytorch"; res->m_creator = []() { diff --git a/src/frontends/tensorflow/src/decoder_argdef.cpp b/src/frontends/tensorflow/src/decoder_argdef.cpp index 0ae1398a073..4f59f851dff 100644 --- a/src/frontends/tensorflow/src/decoder_argdef.cpp +++ b/src/frontends/tensorflow/src/decoder_argdef.cpp @@ -14,31 +14,10 @@ namespace ov { namespace frontend { namespace tensorflow { -namespace { -const std::map<::tensorflow::DataType, ov::element::Type>& TYPE_MAP() { - static const std::map<::tensorflow::DataType, ov::element::Type> type_map{ - {::tensorflow::DataType::DT_BOOL, ov::element::boolean}, - {::tensorflow::DataType::DT_INT16, ov::element::i16}, - {::tensorflow::DataType::DT_INT32, ov::element::i32}, - {::tensorflow::DataType::DT_INT64, ov::element::i64}, - {::tensorflow::DataType::DT_HALF, ov::element::f16}, - {::tensorflow::DataType::DT_FLOAT, ov::element::f32}, - {::tensorflow::DataType::DT_DOUBLE, ov::element::f64}, - {::tensorflow::DataType::DT_UINT8, ov::element::u8}, - {::tensorflow::DataType::DT_INT8, ov::element::i8}, - {::tensorflow::DataType::DT_BFLOAT16, ov::element::bf16}}; - return type_map; -} -} // namespace - size_t DecoderArgDef::get_input_size() const { FRONT_END_GENERAL_CHECK(m_op_type == "input_arg" || m_op_type == "output_arg", "[TensorFlow Frontend] Internal error: Incorrect use of DecoderArgDef class."); - if (m_op_type == "input_arg") { - return 0; - } else { - return 1; - } + return m_op_type == "input_arg" ? 0 : 1; } const std::string& DecoderArgDef::get_op_type() const { @@ -74,12 +53,7 @@ void DecoderArgDef::get_input_node(size_t input_port_idx, ov::Any DecoderArgDef::get_attribute(const std::string& name) const { FRONT_END_GENERAL_CHECK(name == "type", "[TensorFlow Frontend] Internal error: DecoderArgDef supports only `type` attribute."); - if (TYPE_MAP().count(m_arg_def->type())) { - return TYPE_MAP().at(m_arg_def->type()); - } else { - // for all unsupported types return dynamic type - return ov::element::dynamic; - } + return get_ov_type(m_arg_def->type()); } } // namespace tensorflow diff --git a/src/frontends/tensorflow/src/decoder_proto.cpp b/src/frontends/tensorflow/src/decoder_proto.cpp index ed4cf21bc91..98413e884cd 100644 --- a/src/frontends/tensorflow/src/decoder_proto.cpp +++ b/src/frontends/tensorflow/src/decoder_proto.cpp @@ -15,20 +15,6 @@ namespace frontend { namespace tensorflow { namespace { -const std::map<::tensorflow::DataType, ov::element::Type>& TYPE_MAP() { - static const std::map<::tensorflow::DataType, ov::element::Type> type_map{ - {::tensorflow::DataType::DT_BOOL, ov::element::boolean}, - {::tensorflow::DataType::DT_INT16, ov::element::i16}, - {::tensorflow::DataType::DT_INT32, ov::element::i32}, - {::tensorflow::DataType::DT_INT64, ov::element::i64}, - {::tensorflow::DataType::DT_HALF, ov::element::f16}, - {::tensorflow::DataType::DT_FLOAT, ov::element::f32}, - {::tensorflow::DataType::DT_DOUBLE, ov::element::f64}, - {::tensorflow::DataType::DT_UINT8, ov::element::u8}, - {::tensorflow::DataType::DT_INT8, ov::element::i8}, - {::tensorflow::DataType::DT_BFLOAT16, ov::element::bf16}}; - return type_map; -} template void extract_tensor_content(const std::string& tensor_content, ov::Tensor* values) { @@ -96,6 +82,24 @@ void extract_compressed_tensor_content(const ::tensorflow::TensorProto& tensor_p #endif } // namespace +ov::element::Type get_ov_type(const ::tensorflow::DataType& type) { + static const std::map<::tensorflow::DataType, ov::element::Type> type_map{ + {::tensorflow::DataType::DT_BOOL, ov::element::boolean}, + {::tensorflow::DataType::DT_INT16, ov::element::i16}, + {::tensorflow::DataType::DT_INT32, ov::element::i32}, + {::tensorflow::DataType::DT_INT64, ov::element::i64}, + {::tensorflow::DataType::DT_HALF, ov::element::f16}, + {::tensorflow::DataType::DT_FLOAT, ov::element::f32}, + {::tensorflow::DataType::DT_DOUBLE, ov::element::f64}, + {::tensorflow::DataType::DT_UINT8, ov::element::u8}, + {::tensorflow::DataType::DT_INT8, ov::element::i8}, + {::tensorflow::DataType::DT_BFLOAT16, ov::element::bf16}}; + + auto it = type_map.find(type); + // for all unsupported types return dynamic type + return it == type_map.end() ? ov::element::dynamic : it->second; +} + ov::Any DecoderProto::get_attribute(const std::string& name) const { auto attrs = decode_attribute_helper(name); if (attrs.empty()) { @@ -125,12 +129,7 @@ ov::Any DecoderProto::get_attribute(const std::string& name) const { } case ::tensorflow::AttrValue::ValueCase::kType: { - if (TYPE_MAP().count(attrs[0].type())) { - return TYPE_MAP().at(attrs[0].type()); - } else { - // for all unsupported types return dynamic type - return ov::element::dynamic; - } + return get_ov_type(attrs[0].type()); } case ::tensorflow::AttrValue::ValueCase::kList: { @@ -169,7 +168,7 @@ ov::Any DecoderProto::get_attribute(const std::string& name) const { if (list.type_size()) { std::vector res; for (int idx = 0; idx < list.type_size(); ++idx) { - res.emplace_back(TYPE_MAP().at(list.type(idx))); + res.emplace_back(get_ov_type(list.type(idx))); } return res; } @@ -194,10 +193,10 @@ ov::Any DecoderProto::get_attribute(const std::string& name) const { } FRONT_END_GENERAL_CHECK(pshape.is_static(), "Dynamic shapes are not supported for Tensor attribute."); const auto& tf_type = tensor_proto.dtype(); + auto ov_type = get_ov_type(tf_type); FRONT_END_GENERAL_CHECK( - TYPE_MAP().count(tf_type), + ov_type.is_static(), "Encountered unknown element type " + DataType_Name(tf_type) + " on an empty tensor_proto"); - auto ov_type = TYPE_MAP().at(tf_type); ov::Tensor res(ov_type, pshape.get_shape()); auto tensor_content = tensor_proto.tensor_content(); if (!tensor_content.empty() && tensor_proto.has_tensor_shape()) { diff --git a/src/frontends/tensorflow/src/decoder_proto.hpp b/src/frontends/tensorflow/src/decoder_proto.hpp index 6b277ad54e6..db1f1138824 100644 --- a/src/frontends/tensorflow/src/decoder_proto.hpp +++ b/src/frontends/tensorflow/src/decoder_proto.hpp @@ -7,7 +7,9 @@ #include #include +#include "openvino/core/type/element_type.hpp" #include "openvino/frontend/tensorflow/decoder.hpp" +#include "types.pb.h" namespace tensorflow { class NodeDef; @@ -18,6 +20,8 @@ namespace ov { namespace frontend { namespace tensorflow { +ov::element::Type get_ov_type(const ::tensorflow::DataType& type); + void parse_producer_name(const std::string& producer_port_name, std::string& producer_name, size_t& producer_output_port_index, diff --git a/src/frontends/tensorflow/src/input_model.cpp b/src/frontends/tensorflow/src/input_model.cpp index 4b3ca8498fe..f6292da7dc9 100644 --- a/src/frontends/tensorflow/src/input_model.cpp +++ b/src/frontends/tensorflow/src/input_model.cpp @@ -56,18 +56,18 @@ public: InputModelTFImpl(const GraphIterator::Ptr& graph_iterator, const ov::frontend::InputModel& input_model, const std::shared_ptr& telemetry); - std::vector getInputs() const; - std::vector getOutputs() const; - ov::frontend::Place::Ptr getPlaceByTensorName(const std::string& tensorName) const; - void overrideAllOutputs(const std::vector& outputs); - void overrideAllInputs(const std::vector& inputs); - void extractSubgraph(const std::vector& inputs, - const std::vector& outputs); - void setPartialShape(ov::frontend::Place::Ptr place, const ov::PartialShape&); - ov::PartialShape getPartialShape(ov::frontend::Place::Ptr place) const; - void setElementType(ov::frontend::Place::Ptr place, const ov::element::Type&); - ov::element::Type getElementType(ov::frontend::Place::Ptr place) const; - void setTensorValue(ov::frontend::Place::Ptr place, const void* value); + std::vector get_inputs() const; + std::vector get_outputs() const; + ov::frontend::Place::Ptr get_place_by_tensor_name(const std::string& tensorName) const; + void override_all_outputs(const std::vector& outputs); + void override_all_inputs(const std::vector& inputs); + void extract_subgraph(const std::vector& inputs, + const std::vector& outputs); + void set_partial_shape(ov::frontend::Place::Ptr place, const ov::PartialShape&); + ov::PartialShape get_partial_shape(ov::frontend::Place::Ptr place) const; + void set_element_type(ov::frontend::Place::Ptr place, const ov::element::Type&); + ov::element::Type get_element_type(ov::frontend::Place::Ptr place) const; + void set_tensor_value(ov::frontend::Place::Ptr place, const void* value); std::vector> get_op_places() const; std::map> get_tensor_places() const { @@ -81,7 +81,7 @@ public: std::vector get_output_names() const; private: - void loadPlaces(); + void load_places(); std::vector> topologically_sort_op_nodes() const; std::vector> m_op_places; @@ -103,7 +103,7 @@ private: bool m_graph_changed = false; }; -void InputModel::InputModelTFImpl::loadPlaces() { +void InputModel::InputModelTFImpl::load_places() { std::set all_op_names; std::set op_names_with_consumers; std::map op_statistics; @@ -325,7 +325,7 @@ InputModel::InputModelTFImpl::InputModelTFImpl(const GraphIterator::Ptr& graph_i : m_graph_iterator(graph_iterator), m_input_model(input_model) { FRONT_END_GENERAL_CHECK(m_graph_iterator, "Null pointer specified for GraphIterator"); - loadPlaces(); + load_places(); } std::shared_ptr InputModel::InputModelTFImpl::get_body_input_model( @@ -346,18 +346,18 @@ InputModel::InputModelTFImpl::InputModelTFImpl(const GraphIterator::Ptr& graph_i FRONT_END_GENERAL_CHECK(m_graph_iterator, "Null pointer specified for GraphIterator"); m_input_names = graph_iterator->get_input_names(); m_output_names = graph_iterator->get_output_names(); - loadPlaces(); + load_places(); } -std::vector InputModel::InputModelTFImpl::getInputs() const { +std::vector InputModel::InputModelTFImpl::get_inputs() const { return m_inputs; } -std::vector InputModel::InputModelTFImpl::getOutputs() const { +std::vector InputModel::InputModelTFImpl::get_outputs() const { return m_outputs; } -ov::frontend::Place::Ptr InputModel::InputModelTFImpl::getPlaceByTensorName(const std::string& tensorName) const { +ov::frontend::Place::Ptr InputModel::InputModelTFImpl::get_place_by_tensor_name(const std::string& tensorName) const { if (m_tensor_places.find(tensorName) != m_tensor_places.end()) return m_tensor_places.at(tensorName); @@ -389,7 +389,7 @@ std::shared_ptr castToTensorPlace(const ov::frontend::Place::Ptr& p FRONT_END_GENERAL_CHECK(false, "Cannot cast this Place to TensorPlaceTF."); } -void InputModel::InputModelTFImpl::overrideAllInputs(const std::vector& inputs) { +void InputModel::InputModelTFImpl::override_all_inputs(const std::vector& inputs) { m_graph_changed = true; m_inputs.clear(); for (const auto& input_place : inputs) { @@ -397,7 +397,7 @@ void InputModel::InputModelTFImpl::overrideAllInputs(const std::vector& outputs) { +void InputModel::InputModelTFImpl::override_all_outputs(const std::vector& outputs) { m_graph_changed = true; m_outputs.clear(); for (const auto& output_place : outputs) { @@ -405,30 +405,30 @@ void InputModel::InputModelTFImpl::overrideAllOutputs(const std::vector& inputs, - const std::vector& outputs) { +void InputModel::InputModelTFImpl::extract_subgraph(const std::vector& inputs, + const std::vector& outputs) { m_graph_changed = true; - overrideAllInputs(inputs); - overrideAllOutputs(outputs); + override_all_inputs(inputs); + override_all_outputs(outputs); } -void InputModel::InputModelTFImpl::setPartialShape(ov::frontend::Place::Ptr place, const ov::PartialShape& p_shape) { +void InputModel::InputModelTFImpl::set_partial_shape(ov::frontend::Place::Ptr place, const ov::PartialShape& p_shape) { castToTensorPlace(place)->set_partial_shape(p_shape); } -ov::PartialShape InputModel::InputModelTFImpl::getPartialShape(ov::frontend::Place::Ptr place) const { +ov::PartialShape InputModel::InputModelTFImpl::get_partial_shape(ov::frontend::Place::Ptr place) const { return castToTensorPlace(place)->get_partial_shape(); } -void InputModel::InputModelTFImpl::setElementType(ov::frontend::Place::Ptr place, const ov::element::Type& type) { +void InputModel::InputModelTFImpl::set_element_type(ov::frontend::Place::Ptr place, const ov::element::Type& type) { castToTensorPlace(place)->set_element_type(type); } -ov::element::Type InputModel::InputModelTFImpl::getElementType(ov::frontend::Place::Ptr place) const { +ov::element::Type InputModel::InputModelTFImpl::get_element_type(ov::frontend::Place::Ptr place) const { return castToTensorPlace(place)->get_element_type(); } -void InputModel::InputModelTFImpl::setTensorValue(ov::frontend::Place::Ptr place, const void* value) { +void InputModel::InputModelTFImpl::set_tensor_value(ov::frontend::Place::Ptr place, const void* value) { m_graph_changed = true; auto tensor_place = castToTensorPlace(place); auto p_shape = tensor_place->get_partial_shape(); @@ -473,48 +473,48 @@ std::map> InputModel::get_tensor_values() const { } std::vector InputModel::get_inputs() const { - return _impl->getInputs(); + return _impl->get_inputs(); } std::vector InputModel::get_outputs() const { - return _impl->getOutputs(); + return _impl->get_outputs(); } ov::frontend::Place::Ptr InputModel::get_place_by_tensor_name(const std::string& tensorName) const { - return _impl->getPlaceByTensorName(tensorName); + return _impl->get_place_by_tensor_name(tensorName); } void InputModel::override_all_outputs(const std::vector& outputs) { - _impl->overrideAllOutputs(outputs); + _impl->override_all_outputs(outputs); } void InputModel::override_all_inputs(const std::vector& inputs) { - _impl->overrideAllInputs(inputs); + _impl->override_all_inputs(inputs); } void InputModel::extract_subgraph(const std::vector& inputs, const std::vector& outputs) { - _impl->extractSubgraph(inputs, outputs); + _impl->extract_subgraph(inputs, outputs); } void InputModel::set_partial_shape(const ov::frontend::Place::Ptr& place, const ov::PartialShape& p_shape) { - _impl->setPartialShape(place, p_shape); + _impl->set_partial_shape(place, p_shape); } ov::PartialShape InputModel::get_partial_shape(const ov::frontend::Place::Ptr& place) const { - return _impl->getPartialShape(place); + return _impl->get_partial_shape(place); } void InputModel::set_element_type(const ov::frontend::Place::Ptr& place, const ov::element::Type& type) { - _impl->setElementType(place, type); + _impl->set_element_type(place, type); } ov::element::Type InputModel::get_element_type(const ov::frontend::Place::Ptr& place) const { - return _impl->getElementType(place); + return _impl->get_element_type(place); } void InputModel::set_tensor_value(const ov::frontend::Place::Ptr& place, const void* value) { - _impl->setTensorValue(place, value); + _impl->set_tensor_value(place, value); } } // namespace tensorflow diff --git a/src/frontends/tensorflow/src/tensorflow.cpp b/src/frontends/tensorflow/src/tensorflow.cpp index 68708de2db5..72162f06444 100644 --- a/src/frontends/tensorflow/src/tensorflow.cpp +++ b/src/frontends/tensorflow/src/tensorflow.cpp @@ -6,11 +6,11 @@ #include "openvino/frontend/tensorflow/frontend.hpp" #include "openvino/frontend/tensorflow/visibility.hpp" -TENSORFLOW_C_API ov::frontend::FrontEndVersion GetAPIVersion() { +TENSORFLOW_C_API ov::frontend::FrontEndVersion get_api_version() { return OV_FRONTEND_API_VERSION; } -TENSORFLOW_C_API void* GetFrontEndData() { +TENSORFLOW_C_API void* get_front_end_data() { auto res = new ov::frontend::FrontEndPluginInfo(); res->m_name = "tf"; res->m_creator = []() { diff --git a/src/frontends/tensorflow_common/src/CMakeLists.txt b/src/frontends/tensorflow_common/src/CMakeLists.txt index b09f3cd7118..b11f4cb1d4d 100644 --- a/src/frontends/tensorflow_common/src/CMakeLists.txt +++ b/src/frontends/tensorflow_common/src/CMakeLists.txt @@ -27,3 +27,10 @@ target_include_directories(${TARGET_NAME} add_clang_format_target(${TARGET_NAME}_clang FOR_TARGETS ${TARGET_NAME}) ov_install_static_lib(${TARGET_NAME} ${OV_CPACK_COMP_CORE}) + +ov_ncc_naming_style(FOR_TARGET ${TARGET_NAME} + SOURCE_DIRECTORIES "${root_dir}/include" + "${root_dir}/src" + ADDITIONAL_INCLUDE_DIRECTORIES + $ + $) \ No newline at end of file diff --git a/src/frontends/tensorflow_lite/src/decoder_flatbuffer.cpp b/src/frontends/tensorflow_lite/src/decoder_flatbuffer.cpp index d5db1d27141..fe5bc351ada 100644 --- a/src/frontends/tensorflow_lite/src/decoder_flatbuffer.cpp +++ b/src/frontends/tensorflow_lite/src/decoder_flatbuffer.cpp @@ -5,6 +5,7 @@ #include "decoder_flatbuffer.h" #include "schema_generated.h" +#include "utils.hpp" namespace ov { namespace frontend { @@ -68,21 +69,21 @@ ov::element::Type DecoderFlatBuffer::get_output_tensor_type(size_t idx) const { std::shared_ptr DecoderFlatBuffer::decode_input_tensor( size_t idx, - const InputModel& model) const { + const ov::frontend::InputModel& model) const { FRONT_END_GENERAL_CHECK(idx < get_input_size(), "Requested input is out-of-range"); return decode_tensor(m_input_info.at(idx), model); } std::shared_ptr DecoderFlatBuffer::decode_output_tensor( size_t idx, - const InputModel& model) const { + const ov::frontend::InputModel& model) const { FRONT_END_GENERAL_CHECK(idx < get_output_size(), "Requested output is out-of-range"); return decode_tensor(m_output_info.at(idx), model); } std::shared_ptr DecoderFlatBuffer::decode_tensor( const ov::frontend::tensorflow_lite::TensorInfo& tensor_info, - const InputModel& model) const { + const ov::frontend::InputModel& model) const { const auto tensor = tensor_info.tensor; std::vector names = {tensor->name()->str()}; diff --git a/src/frontends/tensorflow_lite/src/decoder_flatbuffer.h b/src/frontends/tensorflow_lite/src/decoder_flatbuffer.h index 79dfe027c0b..92325596b70 100644 --- a/src/frontends/tensorflow_lite/src/decoder_flatbuffer.h +++ b/src/frontends/tensorflow_lite/src/decoder_flatbuffer.h @@ -43,8 +43,6 @@ public: return (opts->*member)(); } - - ov::Any get_attribute(const std::string& name) const override { return {}; } @@ -67,9 +65,11 @@ public: const std::string& get_op_type() const override; const std::string& get_op_name() const override; - std::shared_ptr decode_input_tensor(size_t idx, const InputModel& model) const; + std::shared_ptr decode_input_tensor(size_t idx, + const ov::frontend::InputModel& model) const; - std::shared_ptr decode_output_tensor(size_t idx, const InputModel& model) const; + std::shared_ptr decode_output_tensor(size_t idx, + const ov::frontend::InputModel& model) const; private: std::shared_ptr decode_tensor( diff --git a/src/frontends/tensorflow_lite/src/graph_iterator_flatbuffer.cpp b/src/frontends/tensorflow_lite/src/graph_iterator_flatbuffer.cpp index 1f488e8cb75..cc093066956 100644 --- a/src/frontends/tensorflow_lite/src/graph_iterator_flatbuffer.cpp +++ b/src/frontends/tensorflow_lite/src/graph_iterator_flatbuffer.cpp @@ -2,7 +2,11 @@ // SPDX-License-Identifier: Apache-2.0 // -#include +#include "graph_iterator_flatbuffer.hpp" + +#include + +#include "decoder_flatbuffer.h" using namespace ov::frontend::tensorflow_lite; diff --git a/src/frontends/tensorflow_lite/src/graph_iterator_flatbuffer.hpp b/src/frontends/tensorflow_lite/src/graph_iterator_flatbuffer.hpp index 3e5f37ecbe8..497d2b4fae8 100644 --- a/src/frontends/tensorflow_lite/src/graph_iterator_flatbuffer.hpp +++ b/src/frontends/tensorflow_lite/src/graph_iterator_flatbuffer.hpp @@ -6,7 +6,6 @@ #include -#include "decoder_flatbuffer.h" #include "openvino/frontend/exception.hpp" #include "openvino/util/file_util.hpp" #include "schema_generated.h" diff --git a/src/frontends/tensorflow_lite/src/input_model.cpp b/src/frontends/tensorflow_lite/src/input_model.cpp index 8c6a8763791..274b573fab9 100644 --- a/src/frontends/tensorflow_lite/src/input_model.cpp +++ b/src/frontends/tensorflow_lite/src/input_model.cpp @@ -26,9 +26,9 @@ public: InputModelTFLiteImpl(const GraphIteratorFlatBuffer::Ptr& graph_iterator, const ov::frontend::InputModel& input_model, const std::shared_ptr& telemetry); - std::vector getInputs() const; - std::vector getOutputs() const; - ov::frontend::Place::Ptr getPlaceByTensorName(const std::string& tensorName) const; + std::vector get_inputs() const; + std::vector get_outputs() const; + ov::frontend::Place::Ptr get_place_by_tensor_name(const std::string& tensorName) const; ///// Searching for places ///// std::vector> get_op_places() const { @@ -42,26 +42,26 @@ public: } ///// Naming and annotation ///// - void setNameForTensor(const Place::Ptr& tensor, const std::string& new_name); - void addNameForTensor(const Place::Ptr& tensor, const std::string& new_name); - void setNameForOperation(const Place::Ptr& operation, const std::string& new_name); + void set_name_for_tensor(const Place::Ptr& tensor, const std::string& new_name); + void add_name_for_tensor(const Place::Ptr& tensor, const std::string& new_name); + void set_name_for_operation(const Place::Ptr& operation, const std::string& new_name); ///// Setting / getting tensor properties ///// - void setPartialShape(ov::frontend::Place::Ptr place, const ov::PartialShape& shape); - ov::PartialShape getPartialShape(ov::frontend::Place::Ptr place) const; - void setElementType(ov::frontend::Place::Ptr place, const ov::element::Type& type); - ov::element::Type getElementType(ov::frontend::Place::Ptr place) const; - void setTensorValue(ov::frontend::Place::Ptr place, const void* value); + void set_partial_shape(ov::frontend::Place::Ptr place, const ov::PartialShape& shape); + ov::PartialShape get_partial_shape(ov::frontend::Place::Ptr place) const; + void set_element_type(ov::frontend::Place::Ptr place, const ov::element::Type& type); + ov::element::Type get_element_type(ov::frontend::Place::Ptr place) const; + void set_tensor_value(ov::frontend::Place::Ptr place, const void* value); ///// Topology Editing ///// - void overrideAllOutputs(const std::vector& outputs); - void overrideAllInputs(const std::vector& inputs); - void extractSubgraph(const std::vector& inputs, - const std::vector& outputs); + void override_all_outputs(const std::vector& outputs); + void override_all_inputs(const std::vector& inputs); + void extract_subgraph(const std::vector& inputs, + const std::vector& outputs); private: - void loadModel(); - void cleanUp(); + void load_model(); + void clean_up(); std::vector> m_op_places; std::map> m_op_places_map; @@ -76,7 +76,7 @@ private: std::shared_ptr m_telemetry; }; -void InputModel::InputModelTFLiteImpl::loadModel() { +void InputModel::InputModelTFLiteImpl::load_model() { std::map op_statistics; // for telemetry m_op_places.reserve(m_graph_iterator->size()); @@ -169,7 +169,7 @@ InputModel::InputModelTFLiteImpl::InputModelTFLiteImpl(const GraphIteratorFlatBu : m_graph_iterator(graph_iterator), m_input_model(input_model) { FRONT_END_GENERAL_CHECK(m_graph_iterator, "Null pointer specified for GraphIterator"); - loadModel(); + load_model(); } InputModel::InputModelTFLiteImpl::InputModelTFLiteImpl(const GraphIteratorFlatBuffer::Ptr& graph_iterator, @@ -179,14 +179,14 @@ InputModel::InputModelTFLiteImpl::InputModelTFLiteImpl(const GraphIteratorFlatBu m_input_model(input_model), m_telemetry(telemetry) { FRONT_END_GENERAL_CHECK(m_graph_iterator, "Null pointer specified for GraphIterator"); - loadModel(); + load_model(); } -std::vector InputModel::InputModelTFLiteImpl::getInputs() const { +std::vector InputModel::InputModelTFLiteImpl::get_inputs() const { return m_inputs; } -std::vector InputModel::InputModelTFLiteImpl::getOutputs() const { +std::vector InputModel::InputModelTFLiteImpl::get_outputs() const { return m_outputs; } @@ -197,7 +197,8 @@ std::shared_ptr castToTensorPlace(const ov::frontend::Place::Ptr& p FRONT_END_GENERAL_CHECK(false, "Cannot cast this Place to TensorPlace."); } -ov::frontend::Place::Ptr InputModel::InputModelTFLiteImpl::getPlaceByTensorName(const std::string& tensorName) const { +ov::frontend::Place::Ptr InputModel::InputModelTFLiteImpl::get_place_by_tensor_name( + const std::string& tensorName) const { if (m_tensor_places.find(tensorName) != m_tensor_places.end()) return castToTensorPlace(m_tensor_places.at(tensorName)); else @@ -211,23 +212,23 @@ std::shared_ptr castToOpPlace(const ov::frontend::Place::Ptr& place) { FRONT_END_GENERAL_CHECK(false, "Cannot cast this Place to TensorPlace."); } -void InputModel::InputModelTFLiteImpl::setPartialShape(ov::frontend::Place::Ptr place, const PartialShape& shape) { +void InputModel::InputModelTFLiteImpl::set_partial_shape(ov::frontend::Place::Ptr place, const PartialShape& shape) { castToTensorPlace(place)->set_partial_shape(shape); } -ov::PartialShape InputModel::InputModelTFLiteImpl::getPartialShape(ov::frontend::Place::Ptr place) const { +ov::PartialShape InputModel::InputModelTFLiteImpl::get_partial_shape(ov::frontend::Place::Ptr place) const { return castToTensorPlace(place)->get_partial_shape(); } -void InputModel::InputModelTFLiteImpl::setElementType(ov::frontend::Place::Ptr place, const element::Type& type) { +void InputModel::InputModelTFLiteImpl::set_element_type(ov::frontend::Place::Ptr place, const element::Type& type) { castToTensorPlace(place)->set_element_type(type); } -ov::element::Type InputModel::InputModelTFLiteImpl::getElementType(ov::frontend::Place::Ptr place) const { +ov::element::Type InputModel::InputModelTFLiteImpl::get_element_type(ov::frontend::Place::Ptr place) const { return castToTensorPlace(place)->get_element_type(); } -void InputModel::InputModelTFLiteImpl::setTensorValue(ov::frontend::Place::Ptr place, const void* value) { +void InputModel::InputModelTFLiteImpl::set_tensor_value(ov::frontend::Place::Ptr place, const void* value) { auto tensor_place = castToTensorPlace(place); auto p_shape = tensor_place->get_partial_shape(); auto type = tensor_place->get_element_type(); @@ -243,25 +244,26 @@ void InputModel::InputModelTFLiteImpl::setTensorValue(ov::frontend::Place::Ptr p m_tensor_values[name] = constant; } -void InputModel::InputModelTFLiteImpl::setNameForTensor(const Place::Ptr& tensor, const std::string& new_name) { +void InputModel::InputModelTFLiteImpl::set_name_for_tensor(const Place::Ptr& tensor, const std::string& new_name) { castToTensorPlace(tensor)->set_names({new_name}); } -void InputModel::InputModelTFLiteImpl::addNameForTensor(const Place::Ptr& tensor, const std::string& new_name) { +void InputModel::InputModelTFLiteImpl::add_name_for_tensor(const Place::Ptr& tensor, const std::string& new_name) { auto tf_tensor = castToTensorPlace(tensor); auto names = tf_tensor->get_names(); names.push_back(new_name); tf_tensor->set_names(names); } -void InputModel::InputModelTFLiteImpl::setNameForOperation(const Place::Ptr& operation, const std::string& new_name) { +void InputModel::InputModelTFLiteImpl::set_name_for_operation(const Place::Ptr& operation, + const std::string& new_name) { auto op = castToOpPlace(operation); auto names = op->get_names(); names.push_back(new_name); op->set_names(names); } -void InputModel::InputModelTFLiteImpl::overrideAllInputs(const std::vector& inputs) { +void InputModel::InputModelTFLiteImpl::override_all_inputs(const std::vector& inputs) { for (const auto& input_place : m_inputs) { auto input_lite_place = std::dynamic_pointer_cast(input_place); FRONT_END_GENERAL_CHECK(input_lite_place != nullptr, "Input Model has unexpected place as input"); @@ -271,10 +273,10 @@ void InputModel::InputModelTFLiteImpl::overrideAllInputs(const std::vector& outputs) { +void InputModel::InputModelTFLiteImpl::override_all_outputs(const std::vector& outputs) { for (const auto& output_place : m_outputs) { auto output_lite_place = std::dynamic_pointer_cast(output_place); @@ -285,11 +287,11 @@ void InputModel::InputModelTFLiteImpl::overrideAllOutputs(const std::vector& inputs, - const std::vector& outputs) { +void InputModel::InputModelTFLiteImpl::extract_subgraph(const std::vector& inputs, + const std::vector& outputs) { for (const auto& input_place : m_inputs) { auto input_lite_place = std::dynamic_pointer_cast(input_place); FRONT_END_GENERAL_CHECK(input_lite_place != nullptr, "Input Model has unexpected place as input"); @@ -309,10 +311,10 @@ void InputModel::InputModelTFLiteImpl::extractSubgraph(const std::vector> InputModel::get_tensor_values() const { } std::vector InputModel::get_inputs() const { - return _impl->getInputs(); + return _impl->get_inputs(); } std::vector InputModel::get_outputs() const { - return _impl->getOutputs(); + return _impl->get_outputs(); } ov::frontend::Place::Ptr InputModel::get_place_by_tensor_name(const std::string& tensorName) const { - return _impl->getPlaceByTensorName(tensorName); + return _impl->get_place_by_tensor_name(tensorName); } void InputModel::set_partial_shape(const Place::Ptr& place, const PartialShape& shape) { - _impl->setPartialShape(place, shape); + _impl->set_partial_shape(place, shape); } ov::PartialShape InputModel::get_partial_shape(const Place::Ptr& place) const { - return _impl->getPartialShape(place); + return _impl->get_partial_shape(place); } void InputModel::set_element_type(const Place::Ptr& place, const element::Type& type) { - _impl->setElementType(place, type); + _impl->set_element_type(place, type); } ov::element::Type InputModel::get_element_type(const Place::Ptr& place) const { - return _impl->getElementType(place); + return _impl->get_element_type(place); } void InputModel::set_tensor_value(const Place::Ptr& place, const void* value) { - _impl->setTensorValue(place, value); + _impl->set_tensor_value(place, value); } void InputModel::set_name_for_tensor(const Place::Ptr& tensor, const std::string& new_name) { - _impl->setNameForTensor(tensor, new_name); + _impl->set_name_for_tensor(tensor, new_name); } void InputModel::add_name_for_tensor(const Place::Ptr& tensor, const std::string& new_name) { - _impl->addNameForTensor(tensor, new_name); + _impl->add_name_for_tensor(tensor, new_name); } void InputModel::set_name_for_operation(const Place::Ptr& operation, const std::string& new_name) { - _impl->setNameForOperation(operation, new_name); + _impl->set_name_for_operation(operation, new_name); } void InputModel::override_all_outputs(const std::vector& outputs) { - _impl->overrideAllOutputs(outputs); + _impl->override_all_outputs(outputs); } void InputModel::override_all_inputs(const std::vector& inputs) { - _impl->overrideAllInputs(inputs); + _impl->override_all_inputs(inputs); } void InputModel::extract_subgraph(const std::vector& inputs, const std::vector& outputs) { - _impl->extractSubgraph(inputs, outputs); + _impl->extract_subgraph(inputs, outputs); } } // namespace tensorflow_lite diff --git a/src/frontends/tensorflow_lite/src/input_model.hpp b/src/frontends/tensorflow_lite/src/input_model.hpp index 8b4b415a36b..990cf41efc7 100644 --- a/src/frontends/tensorflow_lite/src/input_model.hpp +++ b/src/frontends/tensorflow_lite/src/input_model.hpp @@ -3,9 +3,10 @@ // #pragma once + #include "graph_iterator_flatbuffer.hpp" -#include "input_model.hpp" #include "openvino/frontend/extension/telemetry.hpp" +#include "openvino/frontend/input_model.hpp" #include "openvino/frontend/tensorflow_lite/frontend.hpp" #include "openvino/opsets/opset1.hpp" #include "tensor_lite_place.hpp" diff --git a/src/frontends/tensorflow_lite/src/tensor_lite_place.cpp b/src/frontends/tensorflow_lite/src/tensor_lite_place.cpp index 32b00b5f929..240acc10bfc 100644 --- a/src/frontends/tensorflow_lite/src/tensor_lite_place.cpp +++ b/src/frontends/tensorflow_lite/src/tensor_lite_place.cpp @@ -1,9 +1,11 @@ // Copyright (C) 2018-2023 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // + #include "tensor_lite_place.hpp" #include "quantization_info.hpp" +#include "utils.hpp" void ov::frontend::tensorflow_lite::TensorLitePlace::translate(ov::Output& output, bool convert_tensor_attrs_to_nodes) { diff --git a/src/frontends/tensorflow_lite/src/tensor_lite_place.hpp b/src/frontends/tensorflow_lite/src/tensor_lite_place.hpp index 9443d3cb9a7..324d008d80a 100644 --- a/src/frontends/tensorflow_lite/src/tensor_lite_place.hpp +++ b/src/frontends/tensorflow_lite/src/tensor_lite_place.hpp @@ -10,7 +10,6 @@ #include "openvino/frontend/tensorflow_lite/visibility.hpp" #include "place.hpp" #include "quantization_info.hpp" -#include "utils.hpp" namespace ov { namespace frontend { diff --git a/src/frontends/tensorflow_lite/src/tensorflow_lite.cpp b/src/frontends/tensorflow_lite/src/tensorflow_lite.cpp index dbfc22da76f..3c4046190f8 100644 --- a/src/frontends/tensorflow_lite/src/tensorflow_lite.cpp +++ b/src/frontends/tensorflow_lite/src/tensorflow_lite.cpp @@ -6,11 +6,11 @@ #include "openvino/frontend/tensorflow_lite/frontend.hpp" #include "openvino/frontend/tensorflow_lite/visibility.hpp" -TENSORFLOW_LITE_C_API ov::frontend::FrontEndVersion GetAPIVersion() { +TENSORFLOW_LITE_C_API ov::frontend::FrontEndVersion get_api_version() { return OV_FRONTEND_API_VERSION; } -TENSORFLOW_LITE_C_API void* GetFrontEndData() { +TENSORFLOW_LITE_C_API void* get_front_end_data() { auto res = new ov::frontend::FrontEndPluginInfo(); res->m_name = "tflite"; res->m_creator = []() { diff --git a/src/frontends/tensorflow_lite/src/utils.cpp b/src/frontends/tensorflow_lite/src/utils.cpp index 1a4327c7c71..e69b0d056a5 100644 --- a/src/frontends/tensorflow_lite/src/utils.cpp +++ b/src/frontends/tensorflow_lite/src/utils.cpp @@ -14,14 +14,14 @@ using namespace ov; std::shared_ptr ov::frontend::tensorflow_lite::get_quantization( const tflite::QuantizationParameters* tf_quantization) { - if (tf_quantization == NULL) + if (tf_quantization == nullptr) return {}; auto quantization = std::make_shared(); auto tf_zp = tf_quantization->zero_point(); auto tf_scale = tf_quantization->scale(); - if (tf_zp != NULL) + if (tf_zp != nullptr) quantization->set_zero_point({(*tf_zp).begin(), (*tf_zp).end()}); - if (tf_scale != NULL) + if (tf_scale != nullptr) quantization->set_scale({(*tf_scale).begin(), (*tf_scale).end()}); if (quantization->get_zero_point().empty() && quantization->get_scale().empty()) return {}; @@ -29,8 +29,7 @@ std::shared_ptr ov::frontend::t return quantization; } -namespace { -const std::map& TYPE_MAP() { +ov::element::Type ov::frontend::tensorflow_lite::get_ov_type(const tflite::TensorType& tf_type) { static const std::map type_map{ {tflite::TensorType_FLOAT32, element::f32}, {tflite::TensorType_FLOAT16, element::f16}, @@ -52,16 +51,9 @@ const std::map& TYPE_MAP() { // {TensorType_RESOURCE, element::resource}, // {TensorType_VARIANT, element::variant}, }; - return type_map; -} -} // namespace - -ov::element::Type ov::frontend::tensorflow_lite::get_ov_type(const tflite::TensorType& tf_type) { - const auto& mapping = TYPE_MAP(); - FRONT_END_GENERAL_CHECK(mapping.find(tf_type) != mapping.end(), - "Unexpected type: ", - tflite::EnumNameTensorType(tf_type)); - return mapping.at(tf_type); + auto it = type_map.find(tf_type); + FRONT_END_GENERAL_CHECK(it != type_map.end(), "Unexpected type: ", tflite::EnumNameTensorType(tf_type)); + return it->second; } ov::PartialShape ov::frontend::tensorflow_lite::get_ov_shape(const flatbuffers::Vector* tf_shape, diff --git a/src/frontends/tensorflow_lite/src/utils.hpp b/src/frontends/tensorflow_lite/src/utils.hpp index f028c91801c..7d4f3ecdd20 100644 --- a/src/frontends/tensorflow_lite/src/utils.hpp +++ b/src/frontends/tensorflow_lite/src/utils.hpp @@ -3,6 +3,7 @@ // #pragma once + #include "decoder_flatbuffer.h" #include "place.hpp" #include "quantization_info.hpp" diff --git a/src/inference/CMakeLists.txt b/src/inference/CMakeLists.txt index f3f436e57d5..38363473eb1 100644 --- a/src/inference/CMakeLists.txt +++ b/src/inference/CMakeLists.txt @@ -117,7 +117,7 @@ file(GLOB_RECURSE plugin_api_src "${CMAKE_CURRENT_SOURCE_DIR}/dev_api/*.hpp" add_clang_format_target(${TARGET_NAME}_plugin_api_clang FOR_SOURCES ${plugin_api_src}) ov_ncc_naming_style(FOR_TARGET ${TARGET_NAME}_plugin_api - SOURCE_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/dev_api/openvino" + SOURCE_DIRECTORIES "${CMAKE_CURRENT_SOURCE_DIR}/dev_api/openvino" ADDITIONAL_INCLUDE_DIRECTORIES $) # Create object library @@ -177,7 +177,7 @@ target_include_directories(${TARGET_NAME} INTERFACE $) # Static library used for unit tests which are always built diff --git a/src/inference/dev_api/xml_parse_utils.h b/src/inference/dev_api/xml_parse_utils.h index 606ff4ae4fd..b7386737276 100644 --- a/src/inference/dev_api/xml_parse_utils.h +++ b/src/inference/dev_api/xml_parse_utils.h @@ -36,7 +36,13 @@ * @brief XML helpers function to extract values from `pugi::xml_node` * @ingroup ie_dev_api_xml */ -namespace XMLParseUtils { +namespace pugixml { + +/** + * @brief XML helpers function to extract values from `pugi::xml_node` + * @ingroup ie_dev_api_xml + */ +namespace utils { /** * @brief Gets the integer attribute from `pugi::xml_node` @@ -217,7 +223,8 @@ GetPrecisionAttr(const pugi::xml_node& node, const char* str, InferenceEngine::P */ INFERENCE_ENGINE_API_CPP(int) GetIntChild(const pugi::xml_node& node, const char* str, int defVal); -} // namespace XMLParseUtils +} // namespace utils +} // namespace pugixml /** * @brief A XML parse result structure with an error message and the `pugi::xml_document` document. diff --git a/src/inference/src/compilation_context.cpp b/src/inference/src/compilation_context.cpp index b0e69110ac8..3668a4d932e 100644 --- a/src/inference/src/compilation_context.cpp +++ b/src/inference/src/compilation_context.cpp @@ -209,8 +209,8 @@ std::istream& operator>>(std::istream& stream, CompiledBlobHeader& header) { } pugi::xml_node compiledBlobNode = document.document_element(); - header.m_ieVersion = XMLParseUtils::GetStrAttr(compiledBlobNode, "ie_version"); - header.m_fileInfo = XMLParseUtils::GetStrAttr(compiledBlobNode, "file_info"); + header.m_ieVersion = pugixml::utils::GetStrAttr(compiledBlobNode, "ie_version"); + header.m_fileInfo = pugixml::utils::GetStrAttr(compiledBlobNode, "file_info"); return stream; } diff --git a/src/inference/src/dev/core_impl.cpp b/src/inference/src/dev/core_impl.cpp index 15e61fc32ac..12e7c1274ec 100644 --- a/src/inference/src/dev/core_impl.cpp +++ b/src/inference/src/dev/core_impl.cpp @@ -74,7 +74,7 @@ void ov::CoreImpl::register_plugins_in_registry(const std::string& xml_config_fi pugi::xml_document& xmlDoc = *parse_result.xml; - using namespace XMLParseUtils; + using namespace pugixml::utils; pugi::xml_node ieNode = xmlDoc.document_element(); pugi::xml_node devicesNode = ieNode.child("plugins"); diff --git a/src/inference/src/ie_network_reader.cpp b/src/inference/src/ie_network_reader.cpp index 1e95b78988c..6baf232df80 100644 --- a/src/inference/src/ie_network_reader.cpp +++ b/src/inference/src/ie_network_reader.cpp @@ -159,7 +159,7 @@ void registerReaders() { } void assertIfIRv7LikeModel(std::istream& modelStream) { - auto irVersion = details::GetIRVersion(modelStream); + auto irVersion = details::get_ir_version(modelStream); bool isIRv7 = irVersion > 1 && irVersion <= 7; if (!isIRv7 || reader_irv7) diff --git a/src/inference/src/xml_parse_utils.cpp b/src/inference/src/xml_parse_utils.cpp index c5f934b4afb..3b753105951 100644 --- a/src/inference/src/xml_parse_utils.cpp +++ b/src/inference/src/xml_parse_utils.cpp @@ -12,7 +12,7 @@ #include "ie_precision.hpp" -int XMLParseUtils::GetIntAttr(const pugi::xml_node& node, const char* str) { +int pugixml::utils::GetIntAttr(const pugi::xml_node& node, const char* str) { auto attr = node.attribute(str); if (attr.empty()) IE_THROW() << "node <" << node.name() << "> is missing mandatory attribute: " << str << " at offset " @@ -27,7 +27,7 @@ int XMLParseUtils::GetIntAttr(const pugi::xml_node& node, const char* str) { return int_value; } -int64_t XMLParseUtils::GetInt64Attr(const pugi::xml_node& node, const char* str) { +int64_t pugixml::utils::GetInt64Attr(const pugi::xml_node& node, const char* str) { auto attr = node.attribute(str); if (attr.empty()) IE_THROW() << "node <" << node.name() << "> is missing mandatory attribute: " << str << " at offset " @@ -42,7 +42,7 @@ int64_t XMLParseUtils::GetInt64Attr(const pugi::xml_node& node, const char* str) return static_cast(int_value); } -uint64_t XMLParseUtils::GetUInt64Attr(const pugi::xml_node& node, const char* str) { +uint64_t pugixml::utils::GetUInt64Attr(const pugi::xml_node& node, const char* str) { auto attr = node.attribute(str); if (attr.empty()) IE_THROW() << "node <" << node.name() << "> is missing mandatory attribute: " << str << " at offset " @@ -57,7 +57,7 @@ uint64_t XMLParseUtils::GetUInt64Attr(const pugi::xml_node& node, const char* st return static_cast(int_value); } -unsigned int XMLParseUtils::GetUIntAttr(const pugi::xml_node& node, const char* str) { +unsigned int pugixml::utils::GetUIntAttr(const pugi::xml_node& node, const char* str) { auto attr = node.attribute(str); if (attr.empty()) IE_THROW() << "node <" << node.name() << "> is missing mandatory attribute: " << str << " at offset " @@ -72,7 +72,7 @@ unsigned int XMLParseUtils::GetUIntAttr(const pugi::xml_node& node, const char* return static_cast(int_value); } -std::string XMLParseUtils::GetStrAttr(const pugi::xml_node& node, const char* str) { +std::string pugixml::utils::GetStrAttr(const pugi::xml_node& node, const char* str) { auto attr = node.attribute(str); if (attr.empty()) IE_THROW() << "node <" << node.name() << "> is missing mandatory attribute: '" << str << "' at offset " @@ -80,14 +80,14 @@ std::string XMLParseUtils::GetStrAttr(const pugi::xml_node& node, const char* st return attr.value(); } -std::string XMLParseUtils::GetStrAttr(const pugi::xml_node& node, const char* str, const char* def) { +std::string pugixml::utils::GetStrAttr(const pugi::xml_node& node, const char* str, const char* def) { auto attr = node.attribute(str); if (attr.empty()) return def; return attr.value(); } -bool XMLParseUtils::GetBoolAttr(const pugi::xml_node& node, const char* str, const bool def) { +bool pugixml::utils::GetBoolAttr(const pugi::xml_node& node, const char* str, const bool def) { auto attr = node.attribute(str); if (attr.empty()) return def; @@ -108,7 +108,7 @@ bool XMLParseUtils::GetBoolAttr(const pugi::xml_node& node, const char* str, con return is_true; } -bool XMLParseUtils::GetBoolAttr(const pugi::xml_node& node, const char* str) { +bool pugixml::utils::GetBoolAttr(const pugi::xml_node& node, const char* str) { auto attr = node.attribute(str); if (attr.empty()) IE_THROW() << "node <" << node.name() << "> is missing mandatory attribute: " << str << " at offset " @@ -130,7 +130,7 @@ bool XMLParseUtils::GetBoolAttr(const pugi::xml_node& node, const char* str) { return is_true; } -float XMLParseUtils::GetFloatAttr(const pugi::xml_node& node, const char* str) { +float pugixml::utils::GetFloatAttr(const pugi::xml_node& node, const char* str) { auto attr = node.attribute(str); if (attr.empty()) IE_THROW() << "node <" << node.name() << "> is missing mandatory attribute: " << str << " at offset " @@ -147,7 +147,7 @@ float XMLParseUtils::GetFloatAttr(const pugi::xml_node& node, const char* str) { return float_value; } -InferenceEngine::Precision XMLParseUtils::GetPrecisionAttr(const pugi::xml_node& node, const char* str) { +InferenceEngine::Precision pugixml::utils::GetPrecisionAttr(const pugi::xml_node& node, const char* str) { auto attr = node.attribute(str); if (attr.empty()) IE_THROW() << "node <" << node.name() << "> is missing mandatory attribute: " << str << " at offset " @@ -155,51 +155,51 @@ InferenceEngine::Precision XMLParseUtils::GetPrecisionAttr(const pugi::xml_node& return InferenceEngine::Precision::FromStr(attr.value()); } -InferenceEngine::Precision XMLParseUtils::GetPrecisionAttr(const pugi::xml_node& node, - const char* str, - InferenceEngine::Precision def) { +InferenceEngine::Precision pugixml::utils::GetPrecisionAttr(const pugi::xml_node& node, + const char* str, + InferenceEngine::Precision def) { auto attr = node.attribute(str); if (attr.empty()) return InferenceEngine::Precision(def); return InferenceEngine::Precision::FromStr(attr.value()); } -int XMLParseUtils::GetIntAttr(const pugi::xml_node& node, const char* str, int defVal) { +int pugixml::utils::GetIntAttr(const pugi::xml_node& node, const char* str, int defVal) { auto attr = node.attribute(str); if (attr.empty()) return defVal; return GetIntAttr(node, str); } -int64_t XMLParseUtils::GetInt64Attr(const pugi::xml_node& node, const char* str, int64_t defVal) { +int64_t pugixml::utils::GetInt64Attr(const pugi::xml_node& node, const char* str, int64_t defVal) { auto attr = node.attribute(str); if (attr.empty()) return defVal; return GetInt64Attr(node, str); } -uint64_t XMLParseUtils::GetUInt64Attr(const pugi::xml_node& node, const char* str, uint64_t defVal) { +uint64_t pugixml::utils::GetUInt64Attr(const pugi::xml_node& node, const char* str, uint64_t defVal) { auto attr = node.attribute(str); if (attr.empty()) return defVal; return GetUInt64Attr(node, str); } -unsigned int XMLParseUtils::GetUIntAttr(const pugi::xml_node& node, const char* str, unsigned int defVal) { +unsigned int pugixml::utils::GetUIntAttr(const pugi::xml_node& node, const char* str, unsigned int defVal) { auto attr = node.attribute(str); if (attr.empty()) return defVal; return GetUIntAttr(node, str); } -float XMLParseUtils::GetFloatAttr(const pugi::xml_node& node, const char* str, float defVal) { +float pugixml::utils::GetFloatAttr(const pugi::xml_node& node, const char* str, float defVal) { auto attr = node.attribute(str); if (attr.empty()) return defVal; return GetFloatAttr(node, str); } -int XMLParseUtils::GetIntChild(const pugi::xml_node& node, const char* str, int defVal) { +int pugixml::utils::GetIntChild(const pugi::xml_node& node, const char* str, int defVal) { auto child = node.child(str); if (child.empty()) return defVal; diff --git a/src/plugins/hetero/executable_network.cpp b/src/plugins/hetero/executable_network.cpp index 6f37bdd7c3a..989cf8ad3d4 100644 --- a/src/plugins/hetero/executable_network.cpp +++ b/src/plugins/hetero/executable_network.cpp @@ -467,7 +467,7 @@ HeteroExecutableNetwork::HeteroExecutableNetwork(std::istream& heteroModel, IE_THROW(NetworkNotRead) << "Error reading HETERO device xml header"; } - using namespace XMLParseUtils; + using namespace pugixml::utils; pugi::xml_node heteroNode = heteroXmlDoc.document_element(); _name = GetStrAttr(heteroNode, "name"); diff --git a/src/plugins/intel_gna/legacy/include/legacy/ie_ir_version.hpp b/src/plugins/intel_gna/legacy/include/legacy/ie_ir_version.hpp index b958d5fe857..eb532d448cc 100644 --- a/src/plugins/intel_gna/legacy/include/legacy/ie_ir_version.hpp +++ b/src/plugins/intel_gna/legacy/include/legacy/ie_ir_version.hpp @@ -10,8 +10,8 @@ namespace InferenceEngine { namespace details { -inline size_t GetIRVersion(pugi::xml_node& root) { - return XMLParseUtils::GetUIntAttr(root, "version", 0); +inline size_t get_ir_version(pugi::xml_node& root) { + return pugixml::utils::GetUIntAttr(root, "version", 0); } /** @@ -19,7 +19,7 @@ inline size_t GetIRVersion(pugi::xml_node& root) { * @param model Models stream * @return IR version, 0 if model does represent IR */ -inline size_t GetIRVersion(std::istream& model) { +inline size_t get_ir_version(std::istream& model) { std::array header = {}; model.seekg(0, model.beg); @@ -38,7 +38,7 @@ inline size_t GetIRVersion(std::istream& model) { std::transform(node_name.begin(), node_name.end(), node_name.begin(), ::tolower); if (node_name == "net") { - return GetIRVersion(root); + return get_ir_version(root); } } diff --git a/src/plugins/intel_gna/tests/deprecated/readers/ir_reader_v7/ie_cnn_net_reader_impl.cpp b/src/plugins/intel_gna/tests/deprecated/readers/ir_reader_v7/ie_cnn_net_reader_impl.cpp index 61f6edeef00..6fbb7611734 100644 --- a/src/plugins/intel_gna/tests/deprecated/readers/ir_reader_v7/ie_cnn_net_reader_impl.cpp +++ b/src/plugins/intel_gna/tests/deprecated/readers/ir_reader_v7/ie_cnn_net_reader_impl.cpp @@ -47,7 +47,7 @@ StatusCode CNNNetReaderImpl::SetWeights(const TBlob::Ptr& weights, Resp } static size_t GetFileVersion(pugi::xml_node& root) { - return XMLParseUtils::GetUIntAttr(root, "version", 0); + return pugixml::utils::GetUIntAttr(root, "version", 0); } StatusCode CNNNetReaderImpl::ReadNetwork(const void* model, size_t size, ResponseDesc* resp) noexcept { diff --git a/src/plugins/intel_gna/tests/deprecated/readers/ir_reader_v7/ie_format_parser.cpp b/src/plugins/intel_gna/tests/deprecated/readers/ir_reader_v7/ie_format_parser.cpp index 6ec142ad7f5..f515c6c956b 100644 --- a/src/plugins/intel_gna/tests/deprecated/readers/ir_reader_v7/ie_format_parser.cpp +++ b/src/plugins/intel_gna/tests/deprecated/readers/ir_reader_v7/ie_format_parser.cpp @@ -16,7 +16,7 @@ using namespace InferenceEngine; using namespace InferenceEngine::details; -using namespace XMLParseUtils; +using namespace pugixml::utils; using namespace std; void LayerParseParameters::addOutputPort(const LayerPortData& port) { @@ -72,7 +72,7 @@ void FormatParser::ParseGenericParams(pugi::xml_node& node, LayerParseParameters layerParsePrms.underIRVersion = _version; InferenceEngine::LayerParams& prms = layerParsePrms.prms; - prms.type = XMLParseUtils::GetStrAttr(node, "type"); + prms.type = pugixml::utils::GetStrAttr(node, "type"); prms.precision = _defPrecision; prms.name = GetStrAttr(node, "name"); diff --git a/src/plugins/intel_gna/tests/deprecated/readers/ir_reader_v7/ie_ir_reader.cpp b/src/plugins/intel_gna/tests/deprecated/readers/ir_reader_v7/ie_ir_reader.cpp index 207cdbe87f9..c67dab31885 100644 --- a/src/plugins/intel_gna/tests/deprecated/readers/ir_reader_v7/ie_ir_reader.cpp +++ b/src/plugins/intel_gna/tests/deprecated/readers/ir_reader_v7/ie_ir_reader.cpp @@ -20,7 +20,7 @@ using namespace InferenceEngine; bool IRReader::supportModel(std::istream& model) const { OV_ITT_SCOPED_TASK(itt::domains::V7Reader, "IRReader::supportModel"); - auto version = details::GetIRVersion(model); + auto version = details::get_ir_version(model); return version > 1 && version <= 7; } @@ -39,7 +39,7 @@ CNNNetwork IRReader::read(std::istream& model, } pugi::xml_node root = xmlDoc.document_element(); - auto version = details::GetIRVersion(root); + auto version = details::get_ir_version(root); IRParser parser(version, exts); return CNNNetwork(parser.parse(root, weights)); } diff --git a/src/plugins/intel_gna/tests/deprecated/readers/ir_reader_v7/ie_layer_parsers.h b/src/plugins/intel_gna/tests/deprecated/readers/ir_reader_v7/ie_layer_parsers.h index e5449aa0f6b..fb9a9f2c069 100644 --- a/src/plugins/intel_gna/tests/deprecated/readers/ir_reader_v7/ie_layer_parsers.h +++ b/src/plugins/intel_gna/tests/deprecated/readers/ir_reader_v7/ie_layer_parsers.h @@ -26,7 +26,7 @@ inline pugi::xml_node GetChild(const pugi::xml_node& node, std::vectorm_name = "openvino_mock_mo_frontend";