Added NCC style for frontends sources (#16200)

* Ability to provide several source dirs for ncc-style checks

* Fixed include headers; added NCC to TF common

* Fixed NCC for frontends

* Fixed NCC for frontends

* Extra fixes

* Fixest push --f

* Clang-format

* Apply comments

* Add an option to specify required clang-format version

* Update src/frontends/tensorflow/src/decoder_proto.cpp

* Update src/frontends/tensorflow/src/decoder_proto.cpp
This commit is contained in:
Ilya Lavrenov 2023-03-13 18:54:00 +04:00 committed by GitHub
parent a84f87e9dc
commit f080a0d9cf
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
60 changed files with 451 additions and 448 deletions

View File

@ -3,23 +3,23 @@
#
if(ENABLE_CLANG_FORMAT)
set(clang_format_required_version 9)
set(CLANG_FORMAT_FILENAME clang-format-${clang_format_required_version} clang-format)
set(CLANG_FORMAT_REQUIRED_VERSION 9 CACHE STRING "Clang-format version to use")
set(CLANG_FORMAT_FILENAME clang-format-${CLANG_FORMAT_REQUIRED_VERSION} clang-format)
find_host_program(CLANG_FORMAT NAMES ${CLANG_FORMAT_FILENAME} PATHS ENV PATH)
if(CLANG_FORMAT)
execute_process(COMMAND ${CLANG_FORMAT} ${CMAKE_CURRENT_SOURCE_DIR} ARGS --version OUTPUT_VARIABLE CLANG_VERSION)
if(NOT CLANG_VERSION)
message(WARNING "Supported clang-format version is ${clang_format_required_version}!")
message(WARNING "Supported clang-format version is ${CLANG_FORMAT_REQUIRED_VERSION}!")
set(ENABLE_CLANG_FORMAT OFF)
else()
string(REGEX REPLACE "[^0-9]+([0-9]+)\\..*" "\\1" CLANG_FORMAT_MAJOR_VERSION ${CLANG_VERSION})
if(NOT CLANG_FORMAT_MAJOR_VERSION EQUAL clang_format_required_version)
if(NOT CLANG_FORMAT_MAJOR_VERSION EQUAL CLANG_FORMAT_REQUIRED_VERSION)
message(WARNING "Supported clang-format version is 9! Provided version ${CLANG_FORMAT_MAJOR_VERSION}")
set(ENABLE_CLANG_FORMAT OFF)
endif()
endif()
else()
message(WARNING "Supported clang-format-${clang_format_required_version} is not found!")
message(WARNING "Supported clang-format-${CLANG_FORMAT_REQUIRED_VERSION} is not found!")
set(ENABLE_CLANG_FORMAT OFF)
endif()
endif()

View File

@ -33,7 +33,7 @@ if (ENABLE_UB_SANITIZER)
# https://github.com/KhronosGroup/OpenCL-CLHPP/issues/17
# Mute -fsanitize=function Indirect call of a function through a function pointer of the wrong type.
# Sample cases:
# call to function GetAPIVersion through pointer to incorrect function type 'void *(*)()'
# call to function get_api_version through pointer to incorrect function type 'void *(*)()'
# Mute -fsanitize=alignment Use of a misaligned pointer or creation of a misaligned reference. Also sanitizes assume_aligned-like attributes.
# Sample cases:
# VPU_FixedMaxHeapTest.DefaultConstructor test case load of misaligned address 0x62000000187f for type 'const DataType', which requires 4 byte alignment

View File

@ -15,8 +15,8 @@ set(OV_FRONTEND_MAP_DEFINITION " FrontendsStaticRegistry registry = {")
foreach(frontend IN LISTS FRONTEND_NAMES)
# common
set(_OV_FRONTEND_DATA_FUNC "GetFrontEndData${frontend}")
set(_OV_VERSION_FUNC "GetAPIVersion${frontend}")
set(_OV_FRONTEND_DATA_FUNC "get_front_end_data_${frontend}")
set(_OV_VERSION_FUNC "get_api_version_${frontend}")
# declarations
set(OV_FRONTEND_DECLARATIONS "${OV_FRONTEND_DECLARATIONS}

View File

@ -190,21 +190,8 @@ macro(ov_add_frontend)
if(NOT BUILD_SHARED_LIBS)
# override default function names
target_compile_definitions(${TARGET_NAME} PRIVATE
"-DGetFrontEndData=GetFrontEndData${OV_FRONTEND_NAME}"
"-DGetAPIVersion=GetAPIVersion${OV_FRONTEND_NAME}")
endif()
# enable LTO
set_target_properties(${TARGET_NAME} PROPERTIES
INTERPROCEDURAL_OPTIMIZATION_RELEASE ${ENABLE_LTO})
if(OV_FRONTEND_SKIP_NCC_STYLE)
# frontend's CMakeLists.txt must define its own custom 'ov_ncc_naming_style' step
else()
ov_ncc_naming_style(FOR_TARGET ${TARGET_NAME}
SOURCE_DIRECTORY "${frontend_root_dir}/include"
ADDITIONAL_INCLUDE_DIRECTORIES
$<TARGET_PROPERTY:frontend_common::static,INTERFACE_INCLUDE_DIRECTORIES>)
"-Dget_front_end_data=get_front_end_data_${OV_FRONTEND_NAME}"
"-Dget_api_version=get_api_version_${OV_FRONTEND_NAME}")
endif()
target_include_directories(${TARGET_NAME}
@ -255,6 +242,21 @@ macro(ov_add_frontend)
add_clang_format_target(${TARGET_NAME}_clang FOR_TARGETS ${TARGET_NAME}
EXCLUDE_PATTERNS ${PROTO_SRCS} ${PROTO_HDRS} ${proto_files} ${flatbuffers_schema_files})
# enable LTO
set_target_properties(${TARGET_NAME} PROPERTIES
INTERPROCEDURAL_OPTIMIZATION_RELEASE ${ENABLE_LTO})
if(OV_FRONTEND_SKIP_NCC_STYLE)
# frontend's CMakeLists.txt must define its own custom 'ov_ncc_naming_style' step
else()
ov_ncc_naming_style(FOR_TARGET ${TARGET_NAME}
SOURCE_DIRECTORIES "${frontend_root_dir}/include"
"${frontend_root_dir}/src"
ADDITIONAL_INCLUDE_DIRECTORIES
$<TARGET_PROPERTY:${TARGET_NAME},INTERFACE_INCLUDE_DIRECTORIES>
$<TARGET_PROPERTY:${TARGET_NAME},INCLUDE_DIRECTORIES>)
endif()
add_dependencies(ov_frontends ${TARGET_NAME})
# must be called after all target_link_libraries

View File

@ -10,12 +10,12 @@
namespace {
using GetFrontEndDataFunc = void*();
using GetAPIVersionFunc = ov::frontend::FrontEndVersion();
using get_front_end_data_func = void*();
using get_api_version_func = ov::frontend::FrontEndVersion();
struct Value {
GetFrontEndDataFunc* m_dataFunc;
GetAPIVersionFunc* m_versionFunc;
get_front_end_data_func* m_dataFunc;
get_api_version_func* m_versionFunc;
};
using FrontendsStaticRegistry = std::vector<Value>;

View File

@ -112,13 +112,13 @@ endif()
#
# ov_ncc_naming_style(FOR_TARGET target_name
# SOURCE_DIRECTORY dir
# [SOURCE_DIRECTORIES dir1 dir2 ...]
# [STYLE_FILE style_file.style]
# [ADDITIONAL_INCLUDE_DIRECTORIES dir1 dir2 ..]
# [DEFINITIONS def1 def2 ..])
#
# FOR_TARGET - name of the target
# SOURCE_DIRECTORY - directory to check sources from
# SOURCE_DIRECTORIES - directory to check sources from
# STYLE_FILE - path to the specific style file
# ADDITIONAL_INCLUDE_DIRECTORIES - additional include directories used in checked headers
# DEFINITIONS - additional definitions passed to preprocessor stage
@ -129,9 +129,9 @@ function(ov_ncc_naming_style)
endif()
cmake_parse_arguments(NCC_STYLE "FAIL"
"FOR_TARGET;SOURCE_DIRECTORY;STYLE_FILE" "ADDITIONAL_INCLUDE_DIRECTORIES;DEFINITIONS" ${ARGN})
"FOR_TARGET;STYLE_FILE" "SOURCE_DIRECTORIES;ADDITIONAL_INCLUDE_DIRECTORIES;DEFINITIONS" ${ARGN})
foreach(var FOR_TARGET SOURCE_DIRECTORY)
foreach(var FOR_TARGET SOURCE_DIRECTORIES)
if(NOT DEFINED NCC_STYLE_${var})
message(FATAL_ERROR "${var} is not defined in ov_ncc_naming_style function")
endif()
@ -141,18 +141,18 @@ function(ov_ncc_naming_style)
set(NCC_STYLE_STYLE_FILE ${ncc_style_dir}/openvino.style)
endif()
file(GLOB_RECURSE sources
RELATIVE "${NCC_STYLE_SOURCE_DIRECTORY}"
"${NCC_STYLE_SOURCE_DIRECTORY}/*.hpp"
"${NCC_STYLE_SOURCE_DIRECTORY}/*.cpp")
foreach(source_dir IN LISTS NCC_STYLE_SOURCE_DIRECTORIES)
file(GLOB_RECURSE local_sources "${source_dir}/*.hpp" "${source_dir}/*.cpp")
list(APPEND sources ${local_sources})
endforeach()
list(APPEND NCC_STYLE_ADDITIONAL_INCLUDE_DIRECTORIES "${NCC_STYLE_SOURCE_DIRECTORY}")
# without it sources with same name from different directories will map to same .ncc_style target
file(RELATIVE_PATH source_dir_rel ${CMAKE_SOURCE_DIR} ${NCC_STYLE_SOURCE_DIRECTORY})
list(APPEND NCC_STYLE_ADDITIONAL_INCLUDE_DIRECTORIES ${NCC_STYLE_SOURCE_DIRECTORIES})
foreach(source IN LISTS sources)
set(output_file "${ncc_style_bin_dir}/${source_dir_rel}/${source}.ncc_style")
set(full_source_path "${NCC_STYLE_SOURCE_DIRECTORY}/${source}")
foreach(source_file IN LISTS sources)
get_filename_component(source_dir "${source_file}" DIRECTORY)
file(RELATIVE_PATH source_dir_rel "${CMAKE_SOURCE_DIR}" "${source_dir}")
get_filename_component(source_name "${source_file}" NAME)
set(output_file "${ncc_style_bin_dir}/${source_dir_rel}/${source_name}.ncc_style")
add_custom_command(
OUTPUT
@ -161,7 +161,7 @@ function(ov_ncc_naming_style)
"${CMAKE_COMMAND}"
-D "PYTHON_EXECUTABLE=${PYTHON_EXECUTABLE}"
-D "NCC_PY_SCRIPT=${ncc_script_py}"
-D "INPUT_FILE=${full_source_path}"
-D "INPUT_FILE=${source_file}"
-D "OUTPUT_FILE=${output_file}"
-D "DEFINITIONS=${NCC_STYLE_DEFINITIONS}"
-D "CLANG_LIB_PATH=${libclang_location}"
@ -170,12 +170,12 @@ function(ov_ncc_naming_style)
-D "EXPECTED_FAIL=${NCC_STYLE_FAIL}"
-P "${ncc_style_dir}/ncc_run.cmake"
DEPENDS
"${full_source_path}"
"${source_file}"
"${ncc_style_dir}/openvino.style"
"${ncc_script_py}"
"${ncc_style_dir}/ncc_run.cmake"
COMMENT
"[ncc naming style] ${source}"
"[ncc naming style] ${source_dir_rel}/${source_name}"
VERBATIM)
list(APPEND output_files ${output_file})
endforeach()
@ -191,6 +191,6 @@ endfunction()
if(TARGET ncc_all)
ov_ncc_naming_style(FOR_TARGET ncc_all
SOURCE_DIRECTORY "${ncc_style_dir}/self_check"
SOURCE_DIRECTORIES "${ncc_style_dir}/self_check"
FAIL)
endif()

View File

@ -1,7 +1,7 @@
# custom OpenVINO values
CppMethod: '^(operator\W+|[a-z_\d]+|signaling_NaN|quiet_NaN)$'
ClassName: '^([A-Z][\w]+|b?float16|numeric_limits|ngraph_error|stopwatch|unsupported_op)$'
StructName: '^([A-Z][\w]+|element_type_traits|hash|oi_pair)$'
StructName: '^([A-Z][\w]+|element_type_traits|hash|oi_pair|stat)$'
FunctionName: '^(operator\W+|[a-z_\d]+)|PrintTo$'
Namespace: '^([a-z\d_]*|InferenceEngine)$'
NamespaceAlias: '^([a-z\d_]+|InferenceEngine)$'
@ -27,7 +27,7 @@ CxxDynamicCastExpression: '^.*$'
# not needed values
ClassTemplatePartialSpecialization: '^.*$'
ConversionFunction: '^.*$'
UsingDirective: 'XXXX'
UsingDirective: '^.*$'
ClassAccessSpecifier: '^.*$' # looks like can be fixed
TypeReference: '^.*$' # looks like can be fixed
CxxBaseSpecifier: '^.*$' # looks like can be fixed

View File

@ -83,7 +83,7 @@ if(OpenCV_FOUND)
endif()
# ov_ncc_naming_style(FOR_TARGET "${TARGET_NAME}"
# SOURCE_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}"
# SOURCE_DIRECTORIES "${CMAKE_CURRENT_SOURCE_DIR}"
# ADDITIONAL_INCLUDE_DIRECTORIES
# $<TARGET_PROPERTY:openvino::runtime,INTERFACE_INCLUDE_DIRECTORIES>)

View File

@ -236,7 +236,7 @@ macro(ie_add_sample)
endif()
if(COMMAND ov_ncc_naming_style AND NOT c_sample)
ov_ncc_naming_style(FOR_TARGET "${IE_SAMPLE_NAME}"
SOURCE_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}")
SOURCE_DIRECTORIES "${CMAKE_CURRENT_SOURCE_DIR}")
endif()
endmacro()

View File

@ -428,14 +428,14 @@ void FrontEndMockPy::clear_stat() {
} // namespace frontend
} // namespace ov
MOCK_C_API ov::frontend::FrontEndVersion GetAPIVersion();
MOCK_C_API void* GetFrontEndData();
MOCK_C_API ov::frontend::FrontEndVersion get_api_version();
MOCK_C_API void* get_front_end_data();
MOCK_C_API ov::frontend::FrontEndVersion GetAPIVersion() {
MOCK_C_API ov::frontend::FrontEndVersion get_api_version() {
return OV_FRONTEND_API_VERSION;
}
MOCK_C_API void* GetFrontEndData() {
MOCK_C_API void* get_front_end_data() {
ov::frontend::FrontEndPluginInfo* res = new ov::frontend::FrontEndPluginInfo();
res->m_name = "mock_py";
res->m_creator = []() {

View File

@ -43,6 +43,6 @@ ov_install_static_lib(${TARGET_NAME} ${OV_CPACK_COMP_CORE})
add_clang_format_target(${TARGET_NAME}_clang FOR_TARGETS ${TARGET_NAME})
ov_ncc_naming_style(FOR_TARGET ${TARGET_NAME}
SOURCE_DIRECTORY ${UTIL_INCLUDE_DIR})
SOURCE_DIRECTORIES ${UTIL_INCLUDE_DIR})
openvino_developer_export_targets(COMPONENT core TARGETS ${TARGET_NAME})

View File

@ -87,7 +87,7 @@ target_link_libraries(ngraph_obj PRIVATE ngraph::builder ngraph::reference openv
ie_mark_target_as_cc(ngraph_obj)
ov_ncc_naming_style(FOR_TARGET ngraph_obj
SOURCE_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/include")
SOURCE_DIRECTORIES "${CMAKE_CURRENT_SOURCE_DIR}/include")
add_clang_format_target(ngraph_clang FOR_TARGETS ngraph_obj)

View File

@ -193,14 +193,14 @@ public:
}
};
MOCK_C_API FrontEndVersion GetAPIVersion();
MOCK_C_API void* GetFrontEndData();
MOCK_C_API FrontEndVersion get_api_version();
MOCK_C_API void* get_front_end_data();
MOCK_C_API FrontEndVersion GetAPIVersion() {
MOCK_C_API FrontEndVersion get_api_version() {
return OV_FRONTEND_API_VERSION;
}
MOCK_C_API void* GetFrontEndData() {
MOCK_C_API void* get_front_end_data() {
auto* res = new FrontEndPluginInfo();
res->m_name = "mock1";
res->m_creator = []() {

View File

@ -57,7 +57,7 @@ target_compile_definitions(${TARGET_NAME}_obj PRIVATE
add_clang_format_target(${TARGET_NAME}_clang FOR_TARGETS ${TARGET_NAME}_obj)
ov_ncc_naming_style(FOR_TARGET ${TARGET_NAME}_obj
SOURCE_DIRECTORY "${FRONTEND_INCLUDE_DIR}"
SOURCE_DIRECTORIES "${FRONTEND_INCLUDE_DIR}"
ADDITIONAL_INCLUDE_DIRECTORIES
$<TARGET_PROPERTY:ngraph,INTERFACE_INCLUDE_DIRECTORIES>)

View File

@ -97,13 +97,13 @@ FRONTEND_API FrontEnd::Ptr FrontEndManager::load_by_model(const std::vector<ov::
// --------- Plugin exporting information --------------
/// \brief Each frontend plugin is responsible to export GetAPIVersion function returning
/// \brief Each frontend plugin is responsible to export get_api_version function returning
/// version of frontend API used for this plugin
/// If version is not matched with OV_FRONTEND_API_VERSION - plugin will not be loaded by
/// FrontEndManager
using FrontEndVersion = uint64_t;
/// \brief Each frontend plugin is responsible to export GetFrontEndData function returning
/// \brief Each frontend plugin is responsible to export get_front_end_data function returning
/// heap-allocated pointer to this structure. Will be used by FrontEndManager during loading
/// of plugins
struct FrontEndPluginInfo {

View File

@ -147,7 +147,7 @@ bool PluginInfo::load_internal() {
return false;
}
auto info_addr = reinterpret_cast<void* (*)()>(ov::util::get_symbol(so, "GetAPIVersion"));
auto info_addr = reinterpret_cast<void* (*)()>(ov::util::get_symbol(so, "get_api_version"));
if (!info_addr) {
OPENVINO_DEBUG << "Loaded FrontEnd [" << m_file_path << "] doesn't have API version" << std::endl;
return false;
@ -161,7 +161,7 @@ bool PluginInfo::load_internal() {
return false;
}
auto creator_addr = reinterpret_cast<void* (*)()>(ov::util::get_symbol(so, "GetFrontEndData"));
auto creator_addr = reinterpret_cast<void* (*)()>(ov::util::get_symbol(so, "get_front_end_data"));
if (!creator_addr) {
OPENVINO_DEBUG << "Loaded FrontEnd [" << m_file_path << "] doesn't have Frontend Data" << std::endl;
return false;

View File

@ -23,8 +23,8 @@ namespace frontend {
namespace ir {
namespace {
inline size_t GetIRVersion(pugi::xml_node& root) {
return XMLParseUtils::GetUIntAttr(root, "version", 0);
inline size_t get_ir_version(pugi::xml_node& root) {
return pugixml::utils::GetUIntAttr(root, "version", 0);
}
/**
@ -32,7 +32,7 @@ inline size_t GetIRVersion(pugi::xml_node& root) {
* @param model Models stream
* @return IR version, 0 if model does represent IR
*/
size_t GetIRVersion(std::istream& model) {
size_t get_ir_version(std::istream& model) {
std::array<char, 512> header{};
model.seekg(0, model.beg);
@ -51,7 +51,7 @@ size_t GetIRVersion(std::istream& model) {
std::transform(node_name.begin(), node_name.end(), node_name.begin(), ::tolower);
if (node_name == "net") {
return GetIRVersion(root);
return get_ir_version(root);
}
}
@ -89,9 +89,9 @@ bool FrontEnd::supported_impl(const std::vector<ov::Any>& variants) const {
size_t version;
if (provided_model_stream) {
version = GetIRVersion(*provided_model_stream);
version = get_ir_version(*provided_model_stream);
} else if (local_model_stream.is_open()) {
version = GetIRVersion(local_model_stream);
version = get_ir_version(local_model_stream);
local_model_stream.close();
} else {
return false;
@ -193,7 +193,7 @@ InputModel::Ptr FrontEnd::load_impl(const std::vector<ov::Any>& variants) const
#else
weights_path += ".bin";
#endif
if (!FileUtils::fileExist(weights_path)) {
if (!ov::util::file_exists(weights_path)) {
weights_path.clear();
}
}
@ -238,11 +238,11 @@ std::string FrontEnd::get_name() const {
} // namespace frontend
} // namespace ov
IR_C_API ov::frontend::FrontEndVersion GetAPIVersion() {
IR_C_API ov::frontend::FrontEndVersion get_api_version() {
return OV_FRONTEND_API_VERSION;
}
IR_C_API void* GetFrontEndData() {
IR_C_API void* get_front_end_data() {
frontend::FrontEndPluginInfo* res = new frontend::FrontEndPluginInfo();
res->m_name = "ir";
res->m_creator = []() {

View File

@ -18,7 +18,7 @@ using namespace ngraph;
using namespace InferenceEngine;
namespace {
void ParsePreProcess(pugi::xml_node& root,
void parse_pre_process(pugi::xml_node& root,
std::shared_ptr<ngraph::runtime::AlignedBuffer> weights,
std::shared_ptr<Function> f) {
/* Preprocessing block can have two preprocessing types:
@ -46,7 +46,7 @@ void ParsePreProcess(pugi::xml_node& root,
std::string inputName;
std::shared_ptr<Node> input_node;
inputName = XMLParseUtils::GetStrAttr(ppNode, "reference-layer-name", "");
inputName = pugixml::utils::GetStrAttr(ppNode, "reference-layer-name", "");
inputName = trim(inputName);
if (inputName.empty()) {
@ -110,7 +110,7 @@ void ParsePreProcess(pugi::xml_node& root,
auto input_type = input_node->get_output_element_type(0);
FOREACH_CHILD (chan, ppNode, "channel") {
auto chanNo = XMLParseUtils::GetUInt64Attr(chan, "id", next_channel_id++);
auto chanNo = pugixml::utils::GetUInt64Attr(chan, "id", next_channel_id++);
auto meanNode = chan.child("mean");
if (!meanNode.empty()) {
@ -118,11 +118,11 @@ void ParsePreProcess(pugi::xml_node& root,
IE_THROW() << "mean should have at least one of the following attribute: value, size";
}
if (meanNode.attribute("value")) {
mean_scalar_values.insert({chanNo, XMLParseUtils::GetFloatAttr(meanNode, "value")});
mean_scalar_values.insert({chanNo, pugixml::utils::GetFloatAttr(meanNode, "value")});
}
if (meanNode.attribute("size") && meanNode.attribute("offset")) {
auto const_size = XMLParseUtils::GetUInt64Attr(meanNode, "size");
auto const_offset = XMLParseUtils::GetUInt64Attr(meanNode, "offset");
auto const_size = pugixml::utils::GetUInt64Attr(meanNode, "size");
auto const_offset = pugixml::utils::GetUInt64Attr(meanNode, "offset");
if (shape_size(mean_shape) * input_type.size() != const_size) {
IE_THROW() << "mean blob size mismatch expected input, got: " << const_size << " expecting "
<< mean_shape << " x " << input_type.size();
@ -229,12 +229,12 @@ std::shared_ptr<Function> InputModel::InputModelIRImpl::convert() {
std::unordered_map<std::string, std::shared_ptr<ngraph::Variable>> variables;
// Load default opsets
size_t version = XMLParseUtils::GetUIntAttr(m_root, "version", 0);
size_t version = pugixml::utils::GetUIntAttr(m_root, "version", 0);
ov::XmlDeserializer visitor(m_root, m_weights, m_opsets, m_extensions, variables, version);
std::shared_ptr<ngraph::Function> function;
visitor.on_attribute("net", function);
function->get_rt_info()["version"] = int64_t(version);
ParsePreProcess(m_root, m_weights, function);
parse_pre_process(m_root, m_weights, function);
return function;
}

View File

@ -28,20 +28,20 @@ XmlDeserializer::IoMap XmlDeserializer::updated_io_map(const pugi::xml_node& nod
auto extend_io_map = io_map;
FOREACH_CHILD (layer, body_node.child("layers"), "layer") {
auto type = XMLParseUtils::GetStrAttr(layer, "type");
auto type = pugixml::utils::GetStrAttr(layer, "type");
if (type == "Parameter") {
auto id = XMLParseUtils::GetUIntAttr(layer, "id");
auto id = pugixml::utils::GetUIntAttr(layer, "id");
extend_io_map.inputs.insert({id, -1}); // try add as unconnected
} else if (type == "Result") {
auto id = XMLParseUtils::GetUIntAttr(layer, "id");
auto id = pugixml::utils::GetUIntAttr(layer, "id");
extend_io_map.outputs.insert({id, -1}); // try add as unconnected
}
}
return extend_io_map;
}
std::vector<std::shared_ptr<ngraph::op::util::SubGraphOp::InputDescription>> XmlDeserializer::parseInputDescription(
std::vector<std::shared_ptr<ngraph::op::util::SubGraphOp::InputDescription>> XmlDeserializer::parse_input_description(
const pugi::xml_node& node,
const std::string& body_name,
const std::string& port_map_name) {
@ -53,23 +53,23 @@ std::vector<std::shared_ptr<ngraph::op::util::SubGraphOp::InputDescription>> Xml
// Parse PortMap: external_port_id for inputs does not always appear in consecutive order
std::map<uint64_t, pugi::xml_node> input_map;
FOREACH_CHILD (input, node.child(port_map_name.c_str()), "input") {
int64_t ext_port_id = XMLParseUtils::GetInt64Attr(input, "external_port_id");
int64_t ext_port_id = pugixml::utils::GetInt64Attr(input, "external_port_id");
input_map.emplace(ext_port_id, input);
}
for (const auto& input : input_map) {
auto& xml_input = input.second;
auto axis_attr = xml_input.attribute("axis");
int64_t ti_input_index = XMLParseUtils::GetInt64Attr(xml_input, "external_port_id");
size_t body_parameter_index = XMLParseUtils::GetUIntAttr(xml_input, "internal_layer_id");
int64_t ti_input_index = pugixml::utils::GetInt64Attr(xml_input, "external_port_id");
size_t body_parameter_index = pugixml::utils::GetUIntAttr(xml_input, "internal_layer_id");
// if axis is set, then slicing is enabled. Create ngraph::TensorIterator::SlicedInput.
if (!axis_attr.empty()) {
size_t axis = XMLParseUtils::GetUIntAttr(xml_input, "axis");
int64_t start = XMLParseUtils::GetInt64Attr(xml_input, "start", 0);
int64_t stride = XMLParseUtils::GetInt64Attr(xml_input, "stride", 1);
int64_t end = XMLParseUtils::GetInt64Attr(xml_input, "end", -1);
int64_t part_size = XMLParseUtils::GetInt64Attr(xml_input, "part_size", 1);
size_t axis = pugixml::utils::GetUIntAttr(xml_input, "axis");
int64_t start = pugixml::utils::GetInt64Attr(xml_input, "start", 0);
int64_t stride = pugixml::utils::GetInt64Attr(xml_input, "stride", 1);
int64_t end = pugixml::utils::GetInt64Attr(xml_input, "end", -1);
int64_t part_size = pugixml::utils::GetInt64Attr(xml_input, "part_size", 1);
const auto input_index = up_io_map.inputs.at(body_parameter_index);
@ -84,10 +84,10 @@ std::vector<std::shared_ptr<ngraph::op::util::SubGraphOp::InputDescription>> Xml
// otherwise find corresponding back edge and create ngraph::TensorIterator::MergedInput
bool is_back_edge_exist = false;
FOREACH_CHILD (xml_edge, node.child("back_edges"), "edge") {
size_t to_layer = XMLParseUtils::GetUIntAttr(xml_edge, "to-layer");
size_t to_layer = pugixml::utils::GetUIntAttr(xml_edge, "to-layer");
if (to_layer == body_parameter_index) {
size_t from_layer = XMLParseUtils::GetUIntAttr(xml_edge, "from-layer");
size_t from_layer = pugixml::utils::GetUIntAttr(xml_edge, "from-layer");
const auto input_index = up_io_map.inputs.at(body_parameter_index);
const auto output_index = up_io_map.outputs.at(from_layer);
@ -117,7 +117,7 @@ std::vector<std::shared_ptr<ngraph::op::util::SubGraphOp::InputDescription>> Xml
}
std::vector<std::shared_ptr<ngraph::op::util::MultiSubGraphOp::OutputDescription>>
XmlDeserializer::parseOutputDescription(const pugi::xml_node& node,
XmlDeserializer::parse_output_description(const pugi::xml_node& node,
const std::string& body_name,
const std::string& port_map_name) {
std::vector<std::shared_ptr<ngraph::op::util::MultiSubGraphOp::OutputDescription>> outputs;
@ -127,7 +127,7 @@ XmlDeserializer::parseOutputDescription(const pugi::xml_node& node,
// Parse PortMap: outputs
std::map<int64_t, pugi::xml_node> output_map;
FOREACH_CHILD (output, node.child(port_map_name.c_str()), "output") {
int64_t ext_port_id = XMLParseUtils::GetInt64Attr(output, "external_port_id");
int64_t ext_port_id = pugixml::utils::GetInt64Attr(output, "external_port_id");
output_map.emplace(ext_port_id, output);
}
@ -135,19 +135,19 @@ XmlDeserializer::parseOutputDescription(const pugi::xml_node& node,
for (const auto& output : output_map) {
auto& xml_output = output.second;
auto axis_attr = xml_output.attribute("axis");
size_t body_result_index = XMLParseUtils::GetUIntAttr(xml_output, "internal_layer_id");
size_t body_result_index = pugixml::utils::GetUIntAttr(xml_output, "internal_layer_id");
// if external_port_id < 0 it means that this body result isn't connected to the Loop output
// and is used only for internal needs. For TensorIterator external_port_id is always > 0.
if (XMLParseUtils::GetInt64Attr(xml_output, "external_port_id") >= 0) {
if (pugixml::utils::GetInt64Attr(xml_output, "external_port_id") >= 0) {
// if axis is set, then concatenation is enabled. Create
// ngraph::TensorIterator::ConcatOutput.
if (!axis_attr.empty()) {
int64_t axis = XMLParseUtils::GetInt64Attr(xml_output, "axis");
int64_t start = XMLParseUtils::GetInt64Attr(xml_output, "start", 0);
int64_t stride = XMLParseUtils::GetInt64Attr(xml_output, "stride", 1);
int64_t end = XMLParseUtils::GetInt64Attr(xml_output, "end", -1);
int64_t part_size = XMLParseUtils::GetInt64Attr(xml_output, "part_size", 1);
int64_t axis = pugixml::utils::GetInt64Attr(xml_output, "axis");
int64_t start = pugixml::utils::GetInt64Attr(xml_output, "start", 0);
int64_t stride = pugixml::utils::GetInt64Attr(xml_output, "stride", 1);
int64_t end = pugixml::utils::GetInt64Attr(xml_output, "end", -1);
int64_t part_size = pugixml::utils::GetInt64Attr(xml_output, "part_size", 1);
const auto output_index = up_io_map.outputs.at(body_result_index);
@ -174,7 +174,7 @@ XmlDeserializer::parseOutputDescription(const pugi::xml_node& node,
return outputs;
}
ngraph::op::v5::Loop::SpecialBodyPorts XmlDeserializer::parsePurposeAttribute(const pugi::xml_node& node) {
ngraph::op::v5::Loop::SpecialBodyPorts XmlDeserializer::parse_purpose_attribute(const pugi::xml_node& node) {
ngraph::op::v5::Loop::SpecialBodyPorts result = {-1, -1};
auto body_node = node.child("body");
const auto up_io_map = updated_io_map(node, body_node);
@ -186,19 +186,19 @@ ngraph::op::v5::Loop::SpecialBodyPorts XmlDeserializer::parsePurposeAttribute(co
// order
std::map<uint64_t, pugi::xml_node> input_map;
FOREACH_CHILD (input, node.child("port_map"), "input") {
int64_t ext_port_id = XMLParseUtils::GetInt64Attr(input, "external_port_id");
int64_t ext_port_id = pugixml::utils::GetInt64Attr(input, "external_port_id");
input_map.emplace(ext_port_id, input);
}
std::map<int64_t, pugi::xml_node> output_map;
FOREACH_CHILD (output, node.child("port_map"), "output") {
int64_t ext_port_id = XMLParseUtils::GetInt64Attr(output, "external_port_id");
int64_t ext_port_id = pugixml::utils::GetInt64Attr(output, "external_port_id");
output_map.emplace(ext_port_id, output);
}
for (const auto& input : input_map) {
auto& xml_input = input.second;
auto purpose = XMLParseUtils::GetStrAttr(xml_input, "purpose", "");
size_t body_parameter_index = XMLParseUtils::GetUIntAttr(xml_input, "internal_layer_id");
auto purpose = pugixml::utils::GetStrAttr(xml_input, "purpose", "");
size_t body_parameter_index = pugixml::utils::GetUIntAttr(xml_input, "internal_layer_id");
if (purpose == "current_iteration") {
result.current_iteration_input_idx = up_io_map.inputs.at(body_parameter_index);
}
@ -206,8 +206,8 @@ ngraph::op::v5::Loop::SpecialBodyPorts XmlDeserializer::parsePurposeAttribute(co
for (const auto& output : output_map) {
auto& xml_output = output.second;
auto purpose = XMLParseUtils::GetStrAttr(xml_output, "purpose", "");
size_t body_parameter_index = XMLParseUtils::GetUIntAttr(xml_output, "internal_layer_id");
auto purpose = pugixml::utils::GetStrAttr(xml_output, "purpose", "");
size_t body_parameter_index = pugixml::utils::GetUIntAttr(xml_output, "internal_layer_id");
if (purpose == "execution_condition") {
result.body_condition_output_idx = up_io_map.outputs.at(body_parameter_index);
}
@ -239,13 +239,13 @@ void XmlDeserializer::on_adapter(const std::string& name, ngraph::ValueAccessor<
}
if (auto a = ngraph::as_type<ngraph::AttributeAdapter<
std::vector<std::shared_ptr<ngraph::op::util::MultiSubGraphOp::InputDescription>>>>(&adapter)) {
a->set(parseInputDescription(m_node, body_name, port_map_name));
a->set(parse_input_description(m_node, body_name, port_map_name));
} else if (auto a = ngraph::as_type<ngraph::AttributeAdapter<
std::vector<std::shared_ptr<ngraph::op::util::MultiSubGraphOp::OutputDescription>>>>(&adapter)) {
a->set(parseOutputDescription(m_node, body_name, port_map_name));
a->set(parse_output_description(m_node, body_name, port_map_name));
} else if (auto a =
ngraph::as_type<ngraph::AttributeAdapter<ngraph::op::v5::Loop::SpecialBodyPorts>>(&adapter)) {
a->set(parsePurposeAttribute(m_node));
a->set(parse_purpose_attribute(m_node));
}
}
@ -318,7 +318,7 @@ void XmlDeserializer::on_adapter(const std::string& name, ngraph::ValueAccessor<
&adapter)) {
std::string value;
pugi::xml_node dn = m_node.child("data");
auto type = XMLParseUtils::GetStrAttr(m_node, "type");
auto type = pugixml::utils::GetStrAttr(m_node, "type");
if (dn.empty())
IE_THROW() << "No attrtibutes defined for " << type << " op!";
@ -332,8 +332,8 @@ void XmlDeserializer::on_adapter(const std::string& name, ngraph::ValueAccessor<
std::vector<int64_t> shape;
std::string el_type_str;
size_t offset = XMLParseUtils::GetUInt64Attr(dn, "offset");
size_t size = XMLParseUtils::GetUInt64Attr(dn, "size");
size_t offset = pugixml::utils::GetUInt64Attr(dn, "offset");
size_t size = pugixml::utils::GetUInt64Attr(dn, "size");
if (!getStrAttribute(dn, "element_type", el_type_str))
return;
if (!getParameters<int64_t>(dn, "shape", shape))
@ -357,8 +357,8 @@ void XmlDeserializer::on_adapter(const std::string& name, ngraph::ValueAccessor<
a->set(buffer);
}
} else if (auto a = ngraph::as_type<ngraph::AttributeAdapter<ngraph::op::FrameworkNodeAttrs>>(&adapter)) {
const auto& type = XMLParseUtils::GetStrAttr(m_node, "type");
const auto& version = XMLParseUtils::GetStrAttr(m_node, "version");
const auto& type = pugixml::utils::GetStrAttr(m_node, "type");
const auto& version = pugixml::utils::GetStrAttr(m_node, "version");
ngraph::op::FrameworkNodeAttrs node_attrs;
node_attrs.set_opset_name(version);
@ -414,25 +414,25 @@ std::shared_ptr<ngraph::Function> XmlDeserializer::parse_function(
ngraph::SinkVector sinks;
};
struct edge {
struct Edge {
size_t fromLayerId, fromPortId, toPortId;
};
struct node_params {
struct NodeParams {
pugi::xml_node xml;
GenericLayerParams params;
};
std::map<size_t /*layer-id*/, node_params> params;
std::map<size_t /*layer-id*/, NodeParams> params;
std::vector<size_t /*layer-id*/> outputs;
std::unordered_set<std::string> opName;
std::vector<size_t> order;
std::set<size_t> dfs_used_nodes;
std::map<size_t /*to-layer-id*/, std::vector<edge>> edges;
std::map<size_t /*to-layer-id*/, std::vector<Edge>> edges;
// Read all layers and store their parameters in params map
FOREACH_CHILD (node, root.child("layers"), "layer") {
auto node_param = parseGenericParams(node);
auto node_param = parse_generic_params(node);
if (opName.find(node_param.name) != opName.end() && node_param.type != "Result")
IE_THROW() << "Invalid IR! " << node_param.name << " name is not unique!";
opName.insert(node_param.name);
@ -451,10 +451,10 @@ std::shared_ptr<ngraph::Function> XmlDeserializer::parse_function(
// Read all edges and store them for further usage
FOREACH_CHILD (_ec, root.child("edges"), "edge") {
size_t fromLayer = XMLParseUtils::GetUIntAttr(_ec, "from-layer");
size_t fromPort = XMLParseUtils::GetUIntAttr(_ec, "from-port");
size_t toLayer = XMLParseUtils::GetUIntAttr(_ec, "to-layer");
size_t toPort = XMLParseUtils::GetUIntAttr(_ec, "to-port");
size_t fromLayer = pugixml::utils::GetUIntAttr(_ec, "from-layer");
size_t fromPort = pugixml::utils::GetUIntAttr(_ec, "from-port");
size_t toLayer = pugixml::utils::GetUIntAttr(_ec, "to-layer");
size_t toPort = pugixml::utils::GetUIntAttr(_ec, "to-port");
edges[toLayer].push_back({fromLayer, fromPort, toPort});
}
@ -489,14 +489,14 @@ std::shared_ptr<ngraph::Function> XmlDeserializer::parse_function(
IE_THROW() << "Attempt to access node " << e.fromLayerId << " that not in graph.";
}
auto& p_output = params[e.fromLayerId].params;
size_t const realInputPortId = p.params.getRealInputPortId(e.toPortId);
size_t const realInputPortId = p.params.get_real_input_port_id(e.toPortId);
if (realInputPortId >= inputs.size())
IE_THROW() << p.params.type << " layer " << p.params.name << " with id: " << p.params.layerId
<< " is inconsistent!";
inputs[realInputPortId] = input_node->output(p_output.getRealOutputPortId(e.fromPortId));
inputs[realInputPortId] = input_node->output(p_output.get_real_output_port_id(e.fromPortId));
}
auto node = createNode(inputs, p.xml, weights, p.params);
auto node = create_node(inputs, p.xml, weights, p.params);
id_to_node[layer_id] = node;
// Check that output shape after OpenVINO node validation the same as in IR
@ -537,7 +537,7 @@ std::shared_ptr<ngraph::Function> XmlDeserializer::parse_function(
auto function = std::make_shared<ngraph::Function>(func_nodes.results,
func_nodes.sinks,
func_nodes.parameters,
XMLParseUtils::GetStrAttr(root, "name", ""));
pugixml::utils::GetStrAttr(root, "name", ""));
for (const auto& sink : func_nodes.sinks) {
if (const auto& assign = std::dynamic_pointer_cast<ngraph::op::AssignBase>(sink)) {
assign->add_control_dependency(variable_id_to_read_value.at(assign->get_variable_id()));
@ -582,9 +582,9 @@ private:
ov::Any parse_value(const pugi::xml_node& node) const {
if (has_attr(node)) {
return XMLParseUtils::GetStrAttr(node, "value");
return pugixml::utils::GetStrAttr(node, "value");
} else if (std::string(node.name()) == "unset" && has_attr(node, "unset_cli_parameters")) {
return XMLParseUtils::GetStrAttr(node, "unset_cli_parameters");
return pugixml::utils::GetStrAttr(node, "unset_cli_parameters");
} else {
return parse_node(node);
}
@ -636,7 +636,7 @@ void XmlDeserializer::read_meta_data(const std::shared_ptr<ov::Model>& model, co
if (data.empty())
continue;
if (!data.attribute("value").empty()) {
rt_info[data.name()] = XMLParseUtils::GetStrAttr(data, "value");
rt_info[data.name()] = pugixml::utils::GetStrAttr(data, "value");
} else {
// Use meta data for set of parameters
std::shared_ptr<ov::Meta> meta = std::make_shared<MetaDataParser>(data.name(), data);
@ -660,7 +660,7 @@ void XmlDeserializer::read_legacy_meta_data(const std::shared_ptr<ov::Model>& mo
std::shared_ptr<ov::Meta> meta = std::make_shared<MetaDataParser>("cli_parameters", data);
rt_info["conversion_parameters"] = meta;
} else if (!data.attribute("value").empty()) {
rt_info[data.name()] = XMLParseUtils::GetStrAttr(data, "value");
rt_info[data.name()] = pugixml::utils::GetStrAttr(data, "value");
} else {
OPENVINO_THROW("Unsupported legacy argument: ", data.name());
}
@ -675,13 +675,13 @@ void XmlDeserializer::read_legacy_meta_data(const std::shared_ptr<ov::Model>& mo
read_meta(model, it, root_section.child(it.c_str()));
}
GenericLayerParams XmlDeserializer::parseGenericParams(const pugi::xml_node& node) {
GenericLayerParams XmlDeserializer::parse_generic_params(const pugi::xml_node& node) {
const auto parsePort = [](const pugi::xml_node& parentNode,
const GenericLayerParams& params,
bool input) -> GenericLayerParams::LayerPortData {
GenericLayerParams::LayerPortData port;
port.portId = XMLParseUtils::GetUIntAttr(parentNode, "id");
port.portId = pugixml::utils::GetUIntAttr(parentNode, "id");
FOREACH_CHILD (node, parentNode, "dim") {
int64_t dim = 0;
@ -697,7 +697,7 @@ GenericLayerParams XmlDeserializer::parseGenericParams(const pugi::xml_node& nod
ngraph::element::Type type(ngraph::element::Type_t::undefined);
// Input port hasn't precision
if (!input) {
const std::string& preStr = XMLParseUtils::GetStrAttr(parentNode, "precision");
const std::string& preStr = pugixml::utils::GetStrAttr(parentNode, "precision");
type = InferenceEngine::details::convertPrecision(preStr);
}
port.precision = type;
@ -721,12 +721,12 @@ GenericLayerParams XmlDeserializer::parseGenericParams(const pugi::xml_node& nod
};
GenericLayerParams params;
params.layerId = XMLParseUtils::GetUIntAttr(node, "id");
params.version = XMLParseUtils::GetStrAttr(node, "version");
params.layerId = pugixml::utils::GetUIntAttr(node, "id");
params.version = pugixml::utils::GetStrAttr(node, "version");
params.type = XMLParseUtils::GetStrAttr(node, "type");
params.type = pugixml::utils::GetStrAttr(node, "type");
params.name = XMLParseUtils::GetStrAttr(node, "name");
params.name = pugixml::utils::GetStrAttr(node, "name");
auto outNode = node.child("output");
if (!outNode.empty()) {
@ -753,7 +753,7 @@ static const std::string& translate_type_name(const std::string& name) {
return name;
}
std::shared_ptr<ngraph::Node> XmlDeserializer::createNode(
std::shared_ptr<ngraph::Node> XmlDeserializer::create_node(
const std::vector<ngraph::Output<ngraph::Node>>& inputs,
const pugi::xml_node& node,
const std::shared_ptr<ngraph::runtime::AlignedBuffer>& weights,

View File

@ -35,7 +35,7 @@ struct GenericLayerParams {
std::vector<LayerPortData> inputPorts;
std::vector<LayerPortData> outputPorts;
size_t getRealInputPortId(size_t id) const {
size_t get_real_input_port_id(size_t id) const {
size_t real_id = 0;
for (auto& it : inputPorts) {
if (it.portId == id) {
@ -46,7 +46,7 @@ struct GenericLayerParams {
IE_THROW() << "Can not find input port with id " << id << " in layer " << name;
}
size_t getRealOutputPortId(size_t id) const {
size_t get_real_output_port_id(size_t id) const {
size_t real_id = 0;
for (auto& it : outputPorts) {
if (it.portId == id) {
@ -152,12 +152,14 @@ private:
/// Shall be used only for ops which have port_map attribute.
/// \param node xml op representation
std::vector<std::shared_ptr<ov::op::util::SubGraphOp::InputDescription>>
parseInputDescription(const pugi::xml_node& node, const std::string& body_name, const std::string& port_map_name);
parse_input_description(const pugi::xml_node& node, const std::string& body_name, const std::string& port_map_name);
/// \brief Traverses port_map in order to create vector of OutputDescription shared_ptrs.
/// Shall be used only for ops which have port_map attribute.
/// \param node xml op representation
std::vector<std::shared_ptr<ov::op::util::SubGraphOp::OutputDescription>>
parseOutputDescription(const pugi::xml_node& node, const std::string& body_name, const std::string& port_map_name);
std::vector<std::shared_ptr<ov::op::util::SubGraphOp::OutputDescription>> parse_output_description(
const pugi::xml_node& node,
const std::string& body_name,
const std::string& port_map_name);
// TODO consider to call only once per layer/TI-Loop node
IoMap updated_io_map(const pugi::xml_node& node, const pugi::xml_node& body_node);
@ -171,11 +173,11 @@ private:
/// \brief Traverses xml node representation in order to get the purpose attribute of
/// inputs/outputs in the body of Loop op. \param node xml node representation \return struct
/// with value of purpuse attribute
ov::op::v5::Loop::SpecialBodyPorts parsePurposeAttribute(const pugi::xml_node& node);
ov::op::v5::Loop::SpecialBodyPorts parse_purpose_attribute(const pugi::xml_node& node);
GenericLayerParams parseGenericParams(const pugi::xml_node& node);
GenericLayerParams parse_generic_params(const pugi::xml_node& node);
std::shared_ptr<ov::Node> createNode(const ov::OutputVector& inputs,
std::shared_ptr<ov::Node> create_node(const ov::OutputVector& inputs,
const pugi::xml_node& node,
const std::shared_ptr<ngraph::runtime::AlignedBuffer>& weights,
const GenericLayerParams& params);

View File

@ -105,12 +105,12 @@ private:
m_size);
OPENVINO_ASSERT(m_data, "Can not create map view for ", path);
} else {
m_data = NULL;
m_data = nullptr;
}
}
private:
void* m_data = NULL;
void* m_data = nullptr;
size_t m_size = 0;
HandleHolder m_handle;
HandleHolder m_mapping;

View File

@ -19,7 +19,7 @@ set(ONNX_OPSET_VERSION 17 CACHE INTERNAL "Supported version of ONNX operator set
target_compile_definitions(${TARGET_NAME} PRIVATE ONNX_OPSET_VERSION=${ONNX_OPSET_VERSION})
ov_ncc_naming_style(FOR_TARGET ${TARGET_NAME}
SOURCE_DIRECTORY "${${TARGET_NAME}_INCLUDE_DIR}"
SOURCE_DIRECTORIES "${${TARGET_NAME}_INCLUDE_DIR}"
DEFINITIONS
$<TARGET_PROPERTY:onnx,INTERFACE_COMPILE_DEFINITIONS>
ADDITIONAL_INCLUDE_DIRECTORIES

View File

@ -25,11 +25,11 @@
using namespace ov;
using namespace ov::frontend::onnx;
ONNX_FRONTEND_C_API ov::frontend::FrontEndVersion GetAPIVersion() {
ONNX_FRONTEND_C_API ov::frontend::FrontEndVersion get_api_version() {
return OV_FRONTEND_API_VERSION;
}
ONNX_FRONTEND_C_API void* GetFrontEndData() {
ONNX_FRONTEND_C_API void* get_front_end_data() {
ov::frontend::FrontEndPluginInfo* res = new ov::frontend::FrontEndPluginInfo();
res->m_name = "onnx";
res->m_creator = []() {

View File

@ -21,7 +21,8 @@ namespace paddle {
using namespace ::paddle::framework;
std::map<proto::VarType_Type, ov::element::Type> TYPE_MAP{
ov::element::Type get_ov_type(const ::paddle::framework::proto::VarType_Type& type) {
static const std::map<proto::VarType_Type, ov::element::Type> type_map{
{proto::VarType_Type::VarType_Type_BOOL, ov::element::boolean},
{proto::VarType_Type::VarType_Type_INT16, ov::element::i16},
{proto::VarType_Type::VarType_Type_INT32, ov::element::i32},
@ -33,6 +34,11 @@ std::map<proto::VarType_Type, ov::element::Type> TYPE_MAP{
{proto::VarType_Type::VarType_Type_INT8, ov::element::i8},
{proto::VarType_Type::VarType_Type_BF16, ov::element::bf16}};
auto it = type_map.find(type);
OPENVINO_ASSERT(it != type_map.end(), "Cannot convert PDPD type to ov::element::Type");
return it->second;
}
ov::Any DecoderProto::get_attribute(const std::string& name) const {
auto attrs = decode_attribute_helper(name);
if (attrs.empty()) {
@ -71,12 +77,12 @@ ov::Any DecoderProto::get_attribute(const std::string& name) const {
ov::Any DecoderProto::convert_attribute(const Any& data, const std::type_info& type_info) const {
if (data.is<int32_t>() && type_info == typeid(ov::element::Type)) {
return TYPE_MAP.at(static_cast<proto::VarType_Type>(data.as<int32_t>()));
return get_ov_type(static_cast<proto::VarType_Type>(data.as<int32_t>()));
} else if (data.is<std::vector<int32_t>>() && type_info == typeid(std::vector<ov::element::Type>)) {
const auto& casted = data.as<std::vector<int32_t>>();
std::vector<ov::element::Type> types(casted.size());
for (size_t i = 0; i < casted.size(); ++i) {
types[i] = TYPE_MAP.at(static_cast<proto::VarType_Type>(casted[i]));
types[i] = get_ov_type(static_cast<proto::VarType_Type>(casted[i]));
}
return types;
}

View File

@ -22,7 +22,8 @@
namespace ov {
namespace frontend {
namespace paddle {
extern std::map<::paddle::framework::proto::VarType_Type, ov::element::Type> TYPE_MAP;
ov::element::Type get_ov_type(const ::paddle::framework::proto::VarType_Type& type);
class DecoderProto : public paddle::DecoderBase {
public:

View File

@ -283,7 +283,7 @@ std::map<int32_t, std::shared_ptr<ov::Model>> FrontEnd::convert_each_node_recurs
const auto& var_name = port.arguments()[static_cast<int>(idx)];
ng_outputs[idx].get_tensor().set_names({var_name});
// if nodes_dict already has node mapped to this tensor name it
// usually means that it was overwritten using setTensorValue
// usually means that it was overwritten using set_tensor_value
nodes_dict[var_name] = ng_outputs[idx];
}
}
@ -534,11 +534,11 @@ void FrontEnd::add_extension(const std::shared_ptr<ov::Extension>& extension) {
} // namespace frontend
} // namespace ov
PADDLE_C_API FrontEndVersion GetAPIVersion() {
PADDLE_C_API FrontEndVersion get_api_version() {
return OV_FRONTEND_API_VERSION;
}
PADDLE_C_API void* GetFrontEndData() {
PADDLE_C_API void* get_front_end_data() {
FrontEndPluginInfo* res = new FrontEndPluginInfo();
res->m_name = "paddle";
res->m_creator = []() {

View File

@ -36,17 +36,17 @@ public:
InputModelImpl(const std::vector<std::istream*>& streams,
const InputModel& input_model,
const std::shared_ptr<TelemetryExtension>& telemetry);
std::vector<Place::Ptr> getInputs() const;
std::vector<Place::Ptr> getOutputs() const;
Place::Ptr getPlaceByTensorName(const std::string& tensorName) const;
void overrideAllOutputs(const std::vector<Place::Ptr>& outputs);
void overrideAllInputs(const std::vector<Place::Ptr>& inputs);
void extractSubgraph(const std::vector<Place::Ptr>& inputs, const std::vector<Place::Ptr>& outputs);
void setDefaultShape(Place::Ptr place, const ov::Shape&);
void setPartialShape(Place::Ptr place, const ov::PartialShape&);
ov::PartialShape getPartialShape(Place::Ptr place) const;
void setElementType(Place::Ptr place, const ov::element::Type&);
void setTensorValue(Place::Ptr place, const void* value);
std::vector<Place::Ptr> get_inputs() const;
std::vector<Place::Ptr> get_outputs() const;
Place::Ptr get_place_by_tensor_name(const std::string& tensorName) const;
void override_all_outputs(const std::vector<Place::Ptr>& outputs);
void override_all_inputs(const std::vector<Place::Ptr>& inputs);
void extract_subgraph(const std::vector<Place::Ptr>& inputs, const std::vector<Place::Ptr>& outputs);
void set_default_shape(Place::Ptr place, const ov::Shape&);
void set_partial_shape(Place::Ptr place, const ov::PartialShape&);
ov::PartialShape get_partial_shape(Place::Ptr place) const;
void set_element_type(Place::Ptr place, const ov::element::Type&);
void set_tensor_value(Place::Ptr place, const void* value);
std::vector<std::shared_ptr<OpPlace>> get_op_places(const int32_t blck_idx) const;
std::map<std::string, std::shared_ptr<TensorPlace>> get_var_places() const {
@ -57,10 +57,10 @@ public:
};
private:
void loadPlaces();
void load_places();
template <typename T>
void loadConsts(const std::basic_string<T>& folder_with_weights, std::istream* weight_stream);
void createTempConsts();
void create_temp_consts();
std::vector<std::shared_ptr<OpPlace>> determine_cut_nodes() const;
std::vector<std::vector<std::shared_ptr<OpPlace>>> m_op_places;
@ -77,7 +77,7 @@ private:
bool m_graph_changed = false;
};
void InputModel::InputModelImpl::loadPlaces() {
void InputModel::InputModelImpl::load_places() {
const int cnt_of_blocks = m_fw_ptr->blocks_size();
const auto& blocks = m_fw_ptr->blocks();
std::map<std::string, uint64_t> op_statistics;
@ -138,7 +138,7 @@ void InputModel::InputModelImpl::loadPlaces() {
const auto& tensor_desc = var_place->get_desc().type().lod_tensor().tensor();
const auto& dims = tensor_desc.dims();
var_place->set_element_type(TYPE_MAP[tensor_desc.data_type()]);
var_place->set_element_type(get_ov_type(tensor_desc.data_type()));
var_place->set_partial_shape(PartialShape(std::vector<Dimension>(dims.begin(), dims.end())));
m_inputs.push_back(var_place);
} else if (op.type() == "fetch") {
@ -226,7 +226,7 @@ std::vector<std::shared_ptr<OpPlace>> InputModel::InputModelImpl::determine_cut_
std::vector<std::shared_ptr<OpPlace>> new_op_places;
new_op_places.reserve(m_op_places[0].size());
// Marking nodes from outputs to inputs/constants
for (const auto& output : getOutputs()) {
for (const auto& output : get_outputs()) {
if (!output->is_input()) {
auto paddle_output_op = std::dynamic_pointer_cast<OpPlace>(output->get_producing_operation());
FRONT_END_GENERAL_CHECK(paddle_output_op != nullptr, "Output doesn't have producing operation");
@ -298,7 +298,7 @@ void InputModel::InputModelImpl::loadConsts(const std::basic_string<T>& folder_w
new ::paddle::framework::proto::VarType_TensorDesc());
tensor_desc->ParseFromArray(buf.get(), size);
Shape shape(tensor_desc->dims().cbegin(), tensor_desc->dims().cend());
const auto& type = TYPE_MAP[tensor_desc->data_type()];
const auto& type = get_ov_type(tensor_desc->data_type());
const auto& data_length = shape_size(shape) * type.size();
std::vector<uint8_t> tensor_data(data_length);
@ -344,16 +344,16 @@ InputModel::InputModelImpl::InputModelImpl(const std::basic_string<T>& path,
FRONT_END_GENERAL_CHECK(
version >= 2000000 || version == 0,
"[Frontend]Only Support Paddle greater than 2.0.0, current version " + std::to_string(version));
loadPlaces();
load_places();
if (weights_stream && weights_stream.is_open()) {
loadConsts(std::basic_string<T>{}, &weights_stream);
} else {
loadConsts(path, nullptr);
}
createTempConsts();
create_temp_consts();
}
void InputModel::InputModelImpl::createTempConsts() {
void InputModel::InputModelImpl::create_temp_consts() {
for (const auto& item : m_var_places) {
const auto& var_place = item.second;
const auto& var_desc = var_place->get_desc();
@ -370,7 +370,7 @@ void InputModel::InputModelImpl::createTempConsts() {
// shape, we simply the the first dimension be 0.
if (var_desc.type().has_tensor_array()) {
const auto& tensor = var_desc.type().tensor_array().tensor();
const auto& type = TYPE_MAP[tensor.data_type()];
const auto& type = get_ov_type(tensor.data_type());
std::cout << "WARNING: The PaddlePaddle model has \"TENSOR_ARRAY\" variables, which is supported "
<< " under limited situations.\n";
@ -420,21 +420,21 @@ InputModel::InputModelImpl::InputModelImpl(const std::vector<std::istream*>& str
FRONT_END_GENERAL_CHECK(
version >= 2000000 || version == 0,
"[Frontend]Only Support Paddle greater than 2.0.0, current version " + std::to_string(version));
loadPlaces();
load_places();
if (streams.size() > 1)
loadConsts(std::string(), streams[1]);
createTempConsts();
create_temp_consts();
}
std::vector<Place::Ptr> InputModel::InputModelImpl::getInputs() const {
std::vector<Place::Ptr> InputModel::InputModelImpl::get_inputs() const {
return m_inputs;
}
std::vector<Place::Ptr> InputModel::InputModelImpl::getOutputs() const {
std::vector<Place::Ptr> InputModel::InputModelImpl::get_outputs() const {
return m_outputs;
}
Place::Ptr InputModel::InputModelImpl::getPlaceByTensorName(const std::string& tensorName) const {
Place::Ptr InputModel::InputModelImpl::get_place_by_tensor_name(const std::string& tensorName) const {
if (m_var_places.count(tensorName))
return m_var_places.at(tensorName);
return nullptr;
@ -454,7 +454,7 @@ std::shared_ptr<TensorPlace> castToTensorPlace(const Place::Ptr& place) {
} // namespace
void InputModel::InputModelImpl::overrideAllInputs(const std::vector<Place::Ptr>& inputs) {
void InputModel::InputModelImpl::override_all_inputs(const std::vector<Place::Ptr>& inputs) {
m_graph_changed = true;
m_inputs.clear();
for (const auto& inp : inputs) {
@ -462,7 +462,7 @@ void InputModel::InputModelImpl::overrideAllInputs(const std::vector<Place::Ptr>
}
}
void InputModel::InputModelImpl::overrideAllOutputs(const std::vector<Place::Ptr>& outputs) {
void InputModel::InputModelImpl::override_all_outputs(const std::vector<Place::Ptr>& outputs) {
m_graph_changed = true;
m_outputs.clear();
for (const auto& outp : outputs) {
@ -470,30 +470,30 @@ void InputModel::InputModelImpl::overrideAllOutputs(const std::vector<Place::Ptr
}
}
void InputModel::InputModelImpl::extractSubgraph(const std::vector<Place::Ptr>& inputs,
void InputModel::InputModelImpl::extract_subgraph(const std::vector<Place::Ptr>& inputs,
const std::vector<Place::Ptr>& outputs) {
m_graph_changed = true;
overrideAllInputs(inputs);
overrideAllOutputs(outputs);
override_all_inputs(inputs);
override_all_outputs(outputs);
}
void InputModel::InputModelImpl::setDefaultShape(Place::Ptr place, const ov::Shape& shape) {
FRONT_END_NOT_IMPLEMENTED("setDefaultShape");
void InputModel::InputModelImpl::set_default_shape(Place::Ptr place, const ov::Shape& shape) {
FRONT_END_NOT_IMPLEMENTED("set_default_shape");
}
void InputModel::InputModelImpl::setPartialShape(Place::Ptr place, const ov::PartialShape& p_shape) {
void InputModel::InputModelImpl::set_partial_shape(Place::Ptr place, const ov::PartialShape& p_shape) {
castToTensorPlace(place)->set_partial_shape(p_shape);
}
ov::PartialShape InputModel::InputModelImpl::getPartialShape(Place::Ptr place) const {
ov::PartialShape InputModel::InputModelImpl::get_partial_shape(Place::Ptr place) const {
return castToTensorPlace(place)->get_partial_shape();
}
void InputModel::InputModelImpl::setElementType(Place::Ptr place, const ov::element::Type& type) {
void InputModel::InputModelImpl::set_element_type(Place::Ptr place, const ov::element::Type& type) {
castToTensorPlace(place)->set_element_type(type);
}
void InputModel::InputModelImpl::setTensorValue(Place::Ptr place, const void* value) {
void InputModel::InputModelImpl::set_tensor_value(Place::Ptr place, const void* value) {
m_graph_changed = true;
auto tensor_place = castToTensorPlace(place);
auto p_shape = tensor_place->get_partial_shape();
@ -528,43 +528,43 @@ std::map<paddle::TensorName, Output<Node>> InputModel::get_tensor_values() const
}
std::vector<Place::Ptr> InputModel::get_inputs() const {
return _impl->getInputs();
return _impl->get_inputs();
}
std::vector<Place::Ptr> InputModel::get_outputs() const {
return _impl->getOutputs();
return _impl->get_outputs();
}
Place::Ptr InputModel::get_place_by_tensor_name(const std::string& tensorName) const {
return _impl->getPlaceByTensorName(tensorName);
return _impl->get_place_by_tensor_name(tensorName);
}
void InputModel::override_all_outputs(const std::vector<Place::Ptr>& outputs) {
_impl->overrideAllOutputs(outputs);
_impl->override_all_outputs(outputs);
}
void InputModel::override_all_inputs(const std::vector<Place::Ptr>& inputs) {
_impl->overrideAllInputs(inputs);
_impl->override_all_inputs(inputs);
}
void InputModel::extract_subgraph(const std::vector<Place::Ptr>& inputs, const std::vector<Place::Ptr>& outputs) {
_impl->extractSubgraph(inputs, outputs);
_impl->extract_subgraph(inputs, outputs);
}
void InputModel::set_partial_shape(const Place::Ptr& place, const ov::PartialShape& p_shape) {
_impl->setPartialShape(place, p_shape);
_impl->set_partial_shape(place, p_shape);
}
ov::PartialShape InputModel::get_partial_shape(const Place::Ptr& place) const {
return _impl->getPartialShape(place);
return _impl->get_partial_shape(place);
}
void InputModel::set_element_type(const Place::Ptr& place, const ov::element::Type& type) {
_impl->setElementType(place, type);
_impl->set_element_type(place, type);
}
void InputModel::set_tensor_value(const Place::Ptr& place, const void* value) {
_impl->setTensorValue(place, value);
_impl->set_tensor_value(place, value);
}
} // namespace paddle

View File

@ -213,7 +213,7 @@ TensorPlace::TensorPlace(const ov::frontend::InputModel& input_model,
const auto& var_type = var_desc.type();
if (var_type.type() == ::paddle::framework::proto::VarType::LOD_TENSOR) {
const auto& tensor_desc = var_type.lod_tensor().tensor();
m_type = TYPE_MAP[tensor_desc.data_type()];
m_type = get_ov_type(tensor_desc.data_type());
m_pshape = PartialShape(std::vector<Dimension>(tensor_desc.dims().begin(), tensor_desc.dims().end()));
}
}

View File

@ -6,11 +6,11 @@
#include "openvino/frontend/pytorch/frontend.hpp"
#include "openvino/frontend/pytorch/visibility.hpp"
PYTORCH_C_API ov::frontend::FrontEndVersion GetAPIVersion() {
PYTORCH_C_API ov::frontend::FrontEndVersion get_api_version() {
return OV_FRONTEND_API_VERSION;
}
PYTORCH_C_API void* GetFrontEndData() {
PYTORCH_C_API void* get_front_end_data() {
auto res = new ov::frontend::FrontEndPluginInfo();
res->m_name = "pytorch";
res->m_creator = []() {

View File

@ -14,31 +14,10 @@ namespace ov {
namespace frontend {
namespace tensorflow {
namespace {
const std::map<::tensorflow::DataType, ov::element::Type>& TYPE_MAP() {
static const std::map<::tensorflow::DataType, ov::element::Type> type_map{
{::tensorflow::DataType::DT_BOOL, ov::element::boolean},
{::tensorflow::DataType::DT_INT16, ov::element::i16},
{::tensorflow::DataType::DT_INT32, ov::element::i32},
{::tensorflow::DataType::DT_INT64, ov::element::i64},
{::tensorflow::DataType::DT_HALF, ov::element::f16},
{::tensorflow::DataType::DT_FLOAT, ov::element::f32},
{::tensorflow::DataType::DT_DOUBLE, ov::element::f64},
{::tensorflow::DataType::DT_UINT8, ov::element::u8},
{::tensorflow::DataType::DT_INT8, ov::element::i8},
{::tensorflow::DataType::DT_BFLOAT16, ov::element::bf16}};
return type_map;
}
} // namespace
size_t DecoderArgDef::get_input_size() const {
FRONT_END_GENERAL_CHECK(m_op_type == "input_arg" || m_op_type == "output_arg",
"[TensorFlow Frontend] Internal error: Incorrect use of DecoderArgDef class.");
if (m_op_type == "input_arg") {
return 0;
} else {
return 1;
}
return m_op_type == "input_arg" ? 0 : 1;
}
const std::string& DecoderArgDef::get_op_type() const {
@ -74,12 +53,7 @@ void DecoderArgDef::get_input_node(size_t input_port_idx,
ov::Any DecoderArgDef::get_attribute(const std::string& name) const {
FRONT_END_GENERAL_CHECK(name == "type",
"[TensorFlow Frontend] Internal error: DecoderArgDef supports only `type` attribute.");
if (TYPE_MAP().count(m_arg_def->type())) {
return TYPE_MAP().at(m_arg_def->type());
} else {
// for all unsupported types return dynamic type
return ov::element::dynamic;
}
return get_ov_type(m_arg_def->type());
}
} // namespace tensorflow

View File

@ -15,20 +15,6 @@ namespace frontend {
namespace tensorflow {
namespace {
const std::map<::tensorflow::DataType, ov::element::Type>& TYPE_MAP() {
static const std::map<::tensorflow::DataType, ov::element::Type> type_map{
{::tensorflow::DataType::DT_BOOL, ov::element::boolean},
{::tensorflow::DataType::DT_INT16, ov::element::i16},
{::tensorflow::DataType::DT_INT32, ov::element::i32},
{::tensorflow::DataType::DT_INT64, ov::element::i64},
{::tensorflow::DataType::DT_HALF, ov::element::f16},
{::tensorflow::DataType::DT_FLOAT, ov::element::f32},
{::tensorflow::DataType::DT_DOUBLE, ov::element::f64},
{::tensorflow::DataType::DT_UINT8, ov::element::u8},
{::tensorflow::DataType::DT_INT8, ov::element::i8},
{::tensorflow::DataType::DT_BFLOAT16, ov::element::bf16}};
return type_map;
}
template <typename T>
void extract_tensor_content(const std::string& tensor_content, ov::Tensor* values) {
@ -96,6 +82,24 @@ void extract_compressed_tensor_content(const ::tensorflow::TensorProto& tensor_p
#endif
} // namespace
ov::element::Type get_ov_type(const ::tensorflow::DataType& type) {
static const std::map<::tensorflow::DataType, ov::element::Type> type_map{
{::tensorflow::DataType::DT_BOOL, ov::element::boolean},
{::tensorflow::DataType::DT_INT16, ov::element::i16},
{::tensorflow::DataType::DT_INT32, ov::element::i32},
{::tensorflow::DataType::DT_INT64, ov::element::i64},
{::tensorflow::DataType::DT_HALF, ov::element::f16},
{::tensorflow::DataType::DT_FLOAT, ov::element::f32},
{::tensorflow::DataType::DT_DOUBLE, ov::element::f64},
{::tensorflow::DataType::DT_UINT8, ov::element::u8},
{::tensorflow::DataType::DT_INT8, ov::element::i8},
{::tensorflow::DataType::DT_BFLOAT16, ov::element::bf16}};
auto it = type_map.find(type);
// for all unsupported types return dynamic type
return it == type_map.end() ? ov::element::dynamic : it->second;
}
ov::Any DecoderProto::get_attribute(const std::string& name) const {
auto attrs = decode_attribute_helper(name);
if (attrs.empty()) {
@ -125,12 +129,7 @@ ov::Any DecoderProto::get_attribute(const std::string& name) const {
}
case ::tensorflow::AttrValue::ValueCase::kType: {
if (TYPE_MAP().count(attrs[0].type())) {
return TYPE_MAP().at(attrs[0].type());
} else {
// for all unsupported types return dynamic type
return ov::element::dynamic;
}
return get_ov_type(attrs[0].type());
}
case ::tensorflow::AttrValue::ValueCase::kList: {
@ -169,7 +168,7 @@ ov::Any DecoderProto::get_attribute(const std::string& name) const {
if (list.type_size()) {
std::vector<ov::element::Type> res;
for (int idx = 0; idx < list.type_size(); ++idx) {
res.emplace_back(TYPE_MAP().at(list.type(idx)));
res.emplace_back(get_ov_type(list.type(idx)));
}
return res;
}
@ -194,10 +193,10 @@ ov::Any DecoderProto::get_attribute(const std::string& name) const {
}
FRONT_END_GENERAL_CHECK(pshape.is_static(), "Dynamic shapes are not supported for Tensor attribute.");
const auto& tf_type = tensor_proto.dtype();
auto ov_type = get_ov_type(tf_type);
FRONT_END_GENERAL_CHECK(
TYPE_MAP().count(tf_type),
ov_type.is_static(),
"Encountered unknown element type " + DataType_Name(tf_type) + " on an empty tensor_proto");
auto ov_type = TYPE_MAP().at(tf_type);
ov::Tensor res(ov_type, pshape.get_shape());
auto tensor_content = tensor_proto.tensor_content();
if (!tensor_content.empty() && tensor_proto.has_tensor_shape()) {

View File

@ -7,7 +7,9 @@
#include <string>
#include <vector>
#include "openvino/core/type/element_type.hpp"
#include "openvino/frontend/tensorflow/decoder.hpp"
#include "types.pb.h"
namespace tensorflow {
class NodeDef;
@ -18,6 +20,8 @@ namespace ov {
namespace frontend {
namespace tensorflow {
ov::element::Type get_ov_type(const ::tensorflow::DataType& type);
void parse_producer_name(const std::string& producer_port_name,
std::string& producer_name,
size_t& producer_output_port_index,

View File

@ -56,18 +56,18 @@ public:
InputModelTFImpl(const GraphIterator::Ptr& graph_iterator,
const ov::frontend::InputModel& input_model,
const std::shared_ptr<TelemetryExtension>& telemetry);
std::vector<ov::frontend::Place::Ptr> getInputs() const;
std::vector<ov::frontend::Place::Ptr> getOutputs() const;
ov::frontend::Place::Ptr getPlaceByTensorName(const std::string& tensorName) const;
void overrideAllOutputs(const std::vector<ov::frontend::Place::Ptr>& outputs);
void overrideAllInputs(const std::vector<ov::frontend::Place::Ptr>& inputs);
void extractSubgraph(const std::vector<ov::frontend::Place::Ptr>& inputs,
std::vector<ov::frontend::Place::Ptr> get_inputs() const;
std::vector<ov::frontend::Place::Ptr> get_outputs() const;
ov::frontend::Place::Ptr get_place_by_tensor_name(const std::string& tensorName) const;
void override_all_outputs(const std::vector<ov::frontend::Place::Ptr>& outputs);
void override_all_inputs(const std::vector<ov::frontend::Place::Ptr>& inputs);
void extract_subgraph(const std::vector<ov::frontend::Place::Ptr>& inputs,
const std::vector<ov::frontend::Place::Ptr>& outputs);
void setPartialShape(ov::frontend::Place::Ptr place, const ov::PartialShape&);
ov::PartialShape getPartialShape(ov::frontend::Place::Ptr place) const;
void setElementType(ov::frontend::Place::Ptr place, const ov::element::Type&);
ov::element::Type getElementType(ov::frontend::Place::Ptr place) const;
void setTensorValue(ov::frontend::Place::Ptr place, const void* value);
void set_partial_shape(ov::frontend::Place::Ptr place, const ov::PartialShape&);
ov::PartialShape get_partial_shape(ov::frontend::Place::Ptr place) const;
void set_element_type(ov::frontend::Place::Ptr place, const ov::element::Type&);
ov::element::Type get_element_type(ov::frontend::Place::Ptr place) const;
void set_tensor_value(ov::frontend::Place::Ptr place, const void* value);
std::vector<std::shared_ptr<OpPlace>> get_op_places() const;
std::map<std::string, std::shared_ptr<TensorPlace>> get_tensor_places() const {
@ -81,7 +81,7 @@ public:
std::vector<std::string> get_output_names() const;
private:
void loadPlaces();
void load_places();
std::vector<std::shared_ptr<OpPlace>> topologically_sort_op_nodes() const;
std::vector<std::shared_ptr<OpPlace>> m_op_places;
@ -103,7 +103,7 @@ private:
bool m_graph_changed = false;
};
void InputModel::InputModelTFImpl::loadPlaces() {
void InputModel::InputModelTFImpl::load_places() {
std::set<std::string> all_op_names;
std::set<std::string> op_names_with_consumers;
std::map<std::string, uint64_t> op_statistics;
@ -325,7 +325,7 @@ InputModel::InputModelTFImpl::InputModelTFImpl(const GraphIterator::Ptr& graph_i
: m_graph_iterator(graph_iterator),
m_input_model(input_model) {
FRONT_END_GENERAL_CHECK(m_graph_iterator, "Null pointer specified for GraphIterator");
loadPlaces();
load_places();
}
std::shared_ptr<InputModel> InputModel::InputModelTFImpl::get_body_input_model(
@ -346,18 +346,18 @@ InputModel::InputModelTFImpl::InputModelTFImpl(const GraphIterator::Ptr& graph_i
FRONT_END_GENERAL_CHECK(m_graph_iterator, "Null pointer specified for GraphIterator");
m_input_names = graph_iterator->get_input_names();
m_output_names = graph_iterator->get_output_names();
loadPlaces();
load_places();
}
std::vector<ov::frontend::Place::Ptr> InputModel::InputModelTFImpl::getInputs() const {
std::vector<ov::frontend::Place::Ptr> InputModel::InputModelTFImpl::get_inputs() const {
return m_inputs;
}
std::vector<ov::frontend::Place::Ptr> InputModel::InputModelTFImpl::getOutputs() const {
std::vector<ov::frontend::Place::Ptr> InputModel::InputModelTFImpl::get_outputs() const {
return m_outputs;
}
ov::frontend::Place::Ptr InputModel::InputModelTFImpl::getPlaceByTensorName(const std::string& tensorName) const {
ov::frontend::Place::Ptr InputModel::InputModelTFImpl::get_place_by_tensor_name(const std::string& tensorName) const {
if (m_tensor_places.find(tensorName) != m_tensor_places.end())
return m_tensor_places.at(tensorName);
@ -389,7 +389,7 @@ std::shared_ptr<TensorPlace> castToTensorPlace(const ov::frontend::Place::Ptr& p
FRONT_END_GENERAL_CHECK(false, "Cannot cast this Place to TensorPlaceTF.");
}
void InputModel::InputModelTFImpl::overrideAllInputs(const std::vector<ov::frontend::Place::Ptr>& inputs) {
void InputModel::InputModelTFImpl::override_all_inputs(const std::vector<ov::frontend::Place::Ptr>& inputs) {
m_graph_changed = true;
m_inputs.clear();
for (const auto& input_place : inputs) {
@ -397,7 +397,7 @@ void InputModel::InputModelTFImpl::overrideAllInputs(const std::vector<ov::front
}
}
void InputModel::InputModelTFImpl::overrideAllOutputs(const std::vector<ov::frontend::Place::Ptr>& outputs) {
void InputModel::InputModelTFImpl::override_all_outputs(const std::vector<ov::frontend::Place::Ptr>& outputs) {
m_graph_changed = true;
m_outputs.clear();
for (const auto& output_place : outputs) {
@ -405,30 +405,30 @@ void InputModel::InputModelTFImpl::overrideAllOutputs(const std::vector<ov::fron
}
}
void InputModel::InputModelTFImpl::extractSubgraph(const std::vector<ov::frontend::Place::Ptr>& inputs,
void InputModel::InputModelTFImpl::extract_subgraph(const std::vector<ov::frontend::Place::Ptr>& inputs,
const std::vector<ov::frontend::Place::Ptr>& outputs) {
m_graph_changed = true;
overrideAllInputs(inputs);
overrideAllOutputs(outputs);
override_all_inputs(inputs);
override_all_outputs(outputs);
}
void InputModel::InputModelTFImpl::setPartialShape(ov::frontend::Place::Ptr place, const ov::PartialShape& p_shape) {
void InputModel::InputModelTFImpl::set_partial_shape(ov::frontend::Place::Ptr place, const ov::PartialShape& p_shape) {
castToTensorPlace(place)->set_partial_shape(p_shape);
}
ov::PartialShape InputModel::InputModelTFImpl::getPartialShape(ov::frontend::Place::Ptr place) const {
ov::PartialShape InputModel::InputModelTFImpl::get_partial_shape(ov::frontend::Place::Ptr place) const {
return castToTensorPlace(place)->get_partial_shape();
}
void InputModel::InputModelTFImpl::setElementType(ov::frontend::Place::Ptr place, const ov::element::Type& type) {
void InputModel::InputModelTFImpl::set_element_type(ov::frontend::Place::Ptr place, const ov::element::Type& type) {
castToTensorPlace(place)->set_element_type(type);
}
ov::element::Type InputModel::InputModelTFImpl::getElementType(ov::frontend::Place::Ptr place) const {
ov::element::Type InputModel::InputModelTFImpl::get_element_type(ov::frontend::Place::Ptr place) const {
return castToTensorPlace(place)->get_element_type();
}
void InputModel::InputModelTFImpl::setTensorValue(ov::frontend::Place::Ptr place, const void* value) {
void InputModel::InputModelTFImpl::set_tensor_value(ov::frontend::Place::Ptr place, const void* value) {
m_graph_changed = true;
auto tensor_place = castToTensorPlace(place);
auto p_shape = tensor_place->get_partial_shape();
@ -473,48 +473,48 @@ std::map<std::string, Output<Node>> InputModel::get_tensor_values() const {
}
std::vector<ov::frontend::Place::Ptr> InputModel::get_inputs() const {
return _impl->getInputs();
return _impl->get_inputs();
}
std::vector<ov::frontend::Place::Ptr> InputModel::get_outputs() const {
return _impl->getOutputs();
return _impl->get_outputs();
}
ov::frontend::Place::Ptr InputModel::get_place_by_tensor_name(const std::string& tensorName) const {
return _impl->getPlaceByTensorName(tensorName);
return _impl->get_place_by_tensor_name(tensorName);
}
void InputModel::override_all_outputs(const std::vector<ov::frontend::Place::Ptr>& outputs) {
_impl->overrideAllOutputs(outputs);
_impl->override_all_outputs(outputs);
}
void InputModel::override_all_inputs(const std::vector<ov::frontend::Place::Ptr>& inputs) {
_impl->overrideAllInputs(inputs);
_impl->override_all_inputs(inputs);
}
void InputModel::extract_subgraph(const std::vector<ov::frontend::Place::Ptr>& inputs,
const std::vector<ov::frontend::Place::Ptr>& outputs) {
_impl->extractSubgraph(inputs, outputs);
_impl->extract_subgraph(inputs, outputs);
}
void InputModel::set_partial_shape(const ov::frontend::Place::Ptr& place, const ov::PartialShape& p_shape) {
_impl->setPartialShape(place, p_shape);
_impl->set_partial_shape(place, p_shape);
}
ov::PartialShape InputModel::get_partial_shape(const ov::frontend::Place::Ptr& place) const {
return _impl->getPartialShape(place);
return _impl->get_partial_shape(place);
}
void InputModel::set_element_type(const ov::frontend::Place::Ptr& place, const ov::element::Type& type) {
_impl->setElementType(place, type);
_impl->set_element_type(place, type);
}
ov::element::Type InputModel::get_element_type(const ov::frontend::Place::Ptr& place) const {
return _impl->getElementType(place);
return _impl->get_element_type(place);
}
void InputModel::set_tensor_value(const ov::frontend::Place::Ptr& place, const void* value) {
_impl->setTensorValue(place, value);
_impl->set_tensor_value(place, value);
}
} // namespace tensorflow

View File

@ -6,11 +6,11 @@
#include "openvino/frontend/tensorflow/frontend.hpp"
#include "openvino/frontend/tensorflow/visibility.hpp"
TENSORFLOW_C_API ov::frontend::FrontEndVersion GetAPIVersion() {
TENSORFLOW_C_API ov::frontend::FrontEndVersion get_api_version() {
return OV_FRONTEND_API_VERSION;
}
TENSORFLOW_C_API void* GetFrontEndData() {
TENSORFLOW_C_API void* get_front_end_data() {
auto res = new ov::frontend::FrontEndPluginInfo();
res->m_name = "tf";
res->m_creator = []() {

View File

@ -27,3 +27,10 @@ target_include_directories(${TARGET_NAME}
add_clang_format_target(${TARGET_NAME}_clang FOR_TARGETS ${TARGET_NAME})
ov_install_static_lib(${TARGET_NAME} ${OV_CPACK_COMP_CORE})
ov_ncc_naming_style(FOR_TARGET ${TARGET_NAME}
SOURCE_DIRECTORIES "${root_dir}/include"
"${root_dir}/src"
ADDITIONAL_INCLUDE_DIRECTORIES
$<TARGET_PROPERTY:${TARGET_NAME},INTERFACE_INCLUDE_DIRECTORIES>
$<TARGET_PROPERTY:${TARGET_NAME},INCLUDE_DIRECTORIES>)

View File

@ -5,6 +5,7 @@
#include "decoder_flatbuffer.h"
#include "schema_generated.h"
#include "utils.hpp"
namespace ov {
namespace frontend {
@ -68,21 +69,21 @@ ov::element::Type DecoderFlatBuffer::get_output_tensor_type(size_t idx) const {
std::shared_ptr<ov::frontend::tensorflow_lite::TensorLitePlace> DecoderFlatBuffer::decode_input_tensor(
size_t idx,
const InputModel& model) const {
const ov::frontend::InputModel& model) const {
FRONT_END_GENERAL_CHECK(idx < get_input_size(), "Requested input is out-of-range");
return decode_tensor(m_input_info.at(idx), model);
}
std::shared_ptr<ov::frontend::tensorflow_lite::TensorLitePlace> DecoderFlatBuffer::decode_output_tensor(
size_t idx,
const InputModel& model) const {
const ov::frontend::InputModel& model) const {
FRONT_END_GENERAL_CHECK(idx < get_output_size(), "Requested output is out-of-range");
return decode_tensor(m_output_info.at(idx), model);
}
std::shared_ptr<ov::frontend::tensorflow_lite::TensorLitePlace> DecoderFlatBuffer::decode_tensor(
const ov::frontend::tensorflow_lite::TensorInfo& tensor_info,
const InputModel& model) const {
const ov::frontend::InputModel& model) const {
const auto tensor = tensor_info.tensor;
std::vector<std::string> names = {tensor->name()->str()};

View File

@ -43,8 +43,6 @@ public:
return (opts->*member)();
}
ov::Any get_attribute(const std::string& name) const override {
return {};
}
@ -67,9 +65,11 @@ public:
const std::string& get_op_type() const override;
const std::string& get_op_name() const override;
std::shared_ptr<ov::frontend::tensorflow_lite::TensorLitePlace> decode_input_tensor(size_t idx, const InputModel& model) const;
std::shared_ptr<ov::frontend::tensorflow_lite::TensorLitePlace> decode_input_tensor(size_t idx,
const ov::frontend::InputModel& model) const;
std::shared_ptr<ov::frontend::tensorflow_lite::TensorLitePlace> decode_output_tensor(size_t idx, const InputModel& model) const;
std::shared_ptr<ov::frontend::tensorflow_lite::TensorLitePlace> decode_output_tensor(size_t idx,
const ov::frontend::InputModel& model) const;
private:
std::shared_ptr<ov::frontend::tensorflow_lite::TensorLitePlace> decode_tensor(

View File

@ -2,7 +2,11 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <graph_iterator_flatbuffer.hpp>
#include "graph_iterator_flatbuffer.hpp"
#include <map>
#include "decoder_flatbuffer.h"
using namespace ov::frontend::tensorflow_lite;

View File

@ -6,7 +6,6 @@
#include <fstream>
#include "decoder_flatbuffer.h"
#include "openvino/frontend/exception.hpp"
#include "openvino/util/file_util.hpp"
#include "schema_generated.h"

View File

@ -26,9 +26,9 @@ public:
InputModelTFLiteImpl(const GraphIteratorFlatBuffer::Ptr& graph_iterator,
const ov::frontend::InputModel& input_model,
const std::shared_ptr<TelemetryExtension>& telemetry);
std::vector<ov::frontend::Place::Ptr> getInputs() const;
std::vector<ov::frontend::Place::Ptr> getOutputs() const;
ov::frontend::Place::Ptr getPlaceByTensorName(const std::string& tensorName) const;
std::vector<ov::frontend::Place::Ptr> get_inputs() const;
std::vector<ov::frontend::Place::Ptr> get_outputs() const;
ov::frontend::Place::Ptr get_place_by_tensor_name(const std::string& tensorName) const;
///// Searching for places /////
std::vector<std::shared_ptr<OpPlace>> get_op_places() const {
@ -42,26 +42,26 @@ public:
}
///// Naming and annotation /////
void setNameForTensor(const Place::Ptr& tensor, const std::string& new_name);
void addNameForTensor(const Place::Ptr& tensor, const std::string& new_name);
void setNameForOperation(const Place::Ptr& operation, const std::string& new_name);
void set_name_for_tensor(const Place::Ptr& tensor, const std::string& new_name);
void add_name_for_tensor(const Place::Ptr& tensor, const std::string& new_name);
void set_name_for_operation(const Place::Ptr& operation, const std::string& new_name);
///// Setting / getting tensor properties /////
void setPartialShape(ov::frontend::Place::Ptr place, const ov::PartialShape& shape);
ov::PartialShape getPartialShape(ov::frontend::Place::Ptr place) const;
void setElementType(ov::frontend::Place::Ptr place, const ov::element::Type& type);
ov::element::Type getElementType(ov::frontend::Place::Ptr place) const;
void setTensorValue(ov::frontend::Place::Ptr place, const void* value);
void set_partial_shape(ov::frontend::Place::Ptr place, const ov::PartialShape& shape);
ov::PartialShape get_partial_shape(ov::frontend::Place::Ptr place) const;
void set_element_type(ov::frontend::Place::Ptr place, const ov::element::Type& type);
ov::element::Type get_element_type(ov::frontend::Place::Ptr place) const;
void set_tensor_value(ov::frontend::Place::Ptr place, const void* value);
///// Topology Editing /////
void overrideAllOutputs(const std::vector<ov::frontend::Place::Ptr>& outputs);
void overrideAllInputs(const std::vector<ov::frontend::Place::Ptr>& inputs);
void extractSubgraph(const std::vector<ov::frontend::Place::Ptr>& inputs,
void override_all_outputs(const std::vector<ov::frontend::Place::Ptr>& outputs);
void override_all_inputs(const std::vector<ov::frontend::Place::Ptr>& inputs);
void extract_subgraph(const std::vector<ov::frontend::Place::Ptr>& inputs,
const std::vector<ov::frontend::Place::Ptr>& outputs);
private:
void loadModel();
void cleanUp();
void load_model();
void clean_up();
std::vector<std::shared_ptr<OpPlace>> m_op_places;
std::map<std::string, std::shared_ptr<OpPlace>> m_op_places_map;
@ -76,7 +76,7 @@ private:
std::shared_ptr<TelemetryExtension> m_telemetry;
};
void InputModel::InputModelTFLiteImpl::loadModel() {
void InputModel::InputModelTFLiteImpl::load_model() {
std::map<std::string, uint64_t> op_statistics; // for telemetry
m_op_places.reserve(m_graph_iterator->size());
@ -169,7 +169,7 @@ InputModel::InputModelTFLiteImpl::InputModelTFLiteImpl(const GraphIteratorFlatBu
: m_graph_iterator(graph_iterator),
m_input_model(input_model) {
FRONT_END_GENERAL_CHECK(m_graph_iterator, "Null pointer specified for GraphIterator");
loadModel();
load_model();
}
InputModel::InputModelTFLiteImpl::InputModelTFLiteImpl(const GraphIteratorFlatBuffer::Ptr& graph_iterator,
@ -179,14 +179,14 @@ InputModel::InputModelTFLiteImpl::InputModelTFLiteImpl(const GraphIteratorFlatBu
m_input_model(input_model),
m_telemetry(telemetry) {
FRONT_END_GENERAL_CHECK(m_graph_iterator, "Null pointer specified for GraphIterator");
loadModel();
load_model();
}
std::vector<ov::frontend::Place::Ptr> InputModel::InputModelTFLiteImpl::getInputs() const {
std::vector<ov::frontend::Place::Ptr> InputModel::InputModelTFLiteImpl::get_inputs() const {
return m_inputs;
}
std::vector<ov::frontend::Place::Ptr> InputModel::InputModelTFLiteImpl::getOutputs() const {
std::vector<ov::frontend::Place::Ptr> InputModel::InputModelTFLiteImpl::get_outputs() const {
return m_outputs;
}
@ -197,7 +197,8 @@ std::shared_ptr<TensorPlace> castToTensorPlace(const ov::frontend::Place::Ptr& p
FRONT_END_GENERAL_CHECK(false, "Cannot cast this Place to TensorPlace.");
}
ov::frontend::Place::Ptr InputModel::InputModelTFLiteImpl::getPlaceByTensorName(const std::string& tensorName) const {
ov::frontend::Place::Ptr InputModel::InputModelTFLiteImpl::get_place_by_tensor_name(
const std::string& tensorName) const {
if (m_tensor_places.find(tensorName) != m_tensor_places.end())
return castToTensorPlace(m_tensor_places.at(tensorName));
else
@ -211,23 +212,23 @@ std::shared_ptr<OpPlace> castToOpPlace(const ov::frontend::Place::Ptr& place) {
FRONT_END_GENERAL_CHECK(false, "Cannot cast this Place to TensorPlace.");
}
void InputModel::InputModelTFLiteImpl::setPartialShape(ov::frontend::Place::Ptr place, const PartialShape& shape) {
void InputModel::InputModelTFLiteImpl::set_partial_shape(ov::frontend::Place::Ptr place, const PartialShape& shape) {
castToTensorPlace(place)->set_partial_shape(shape);
}
ov::PartialShape InputModel::InputModelTFLiteImpl::getPartialShape(ov::frontend::Place::Ptr place) const {
ov::PartialShape InputModel::InputModelTFLiteImpl::get_partial_shape(ov::frontend::Place::Ptr place) const {
return castToTensorPlace(place)->get_partial_shape();
}
void InputModel::InputModelTFLiteImpl::setElementType(ov::frontend::Place::Ptr place, const element::Type& type) {
void InputModel::InputModelTFLiteImpl::set_element_type(ov::frontend::Place::Ptr place, const element::Type& type) {
castToTensorPlace(place)->set_element_type(type);
}
ov::element::Type InputModel::InputModelTFLiteImpl::getElementType(ov::frontend::Place::Ptr place) const {
ov::element::Type InputModel::InputModelTFLiteImpl::get_element_type(ov::frontend::Place::Ptr place) const {
return castToTensorPlace(place)->get_element_type();
}
void InputModel::InputModelTFLiteImpl::setTensorValue(ov::frontend::Place::Ptr place, const void* value) {
void InputModel::InputModelTFLiteImpl::set_tensor_value(ov::frontend::Place::Ptr place, const void* value) {
auto tensor_place = castToTensorPlace(place);
auto p_shape = tensor_place->get_partial_shape();
auto type = tensor_place->get_element_type();
@ -243,25 +244,26 @@ void InputModel::InputModelTFLiteImpl::setTensorValue(ov::frontend::Place::Ptr p
m_tensor_values[name] = constant;
}
void InputModel::InputModelTFLiteImpl::setNameForTensor(const Place::Ptr& tensor, const std::string& new_name) {
void InputModel::InputModelTFLiteImpl::set_name_for_tensor(const Place::Ptr& tensor, const std::string& new_name) {
castToTensorPlace(tensor)->set_names({new_name});
}
void InputModel::InputModelTFLiteImpl::addNameForTensor(const Place::Ptr& tensor, const std::string& new_name) {
void InputModel::InputModelTFLiteImpl::add_name_for_tensor(const Place::Ptr& tensor, const std::string& new_name) {
auto tf_tensor = castToTensorPlace(tensor);
auto names = tf_tensor->get_names();
names.push_back(new_name);
tf_tensor->set_names(names);
}
void InputModel::InputModelTFLiteImpl::setNameForOperation(const Place::Ptr& operation, const std::string& new_name) {
void InputModel::InputModelTFLiteImpl::set_name_for_operation(const Place::Ptr& operation,
const std::string& new_name) {
auto op = castToOpPlace(operation);
auto names = op->get_names();
names.push_back(new_name);
op->set_names(names);
}
void InputModel::InputModelTFLiteImpl::overrideAllInputs(const std::vector<ov::frontend::Place::Ptr>& inputs) {
void InputModel::InputModelTFLiteImpl::override_all_inputs(const std::vector<ov::frontend::Place::Ptr>& inputs) {
for (const auto& input_place : m_inputs) {
auto input_lite_place = std::dynamic_pointer_cast<ov::frontend::tensorflow_lite::TensorLitePlace>(input_place);
FRONT_END_GENERAL_CHECK(input_lite_place != nullptr, "Input Model has unexpected place as input");
@ -271,10 +273,10 @@ void InputModel::InputModelTFLiteImpl::overrideAllInputs(const std::vector<ov::f
for (const auto& input_place : inputs) {
m_inputs.push_back(castToTensorPlace(input_place));
}
cleanUp();
clean_up();
}
void InputModel::InputModelTFLiteImpl::overrideAllOutputs(const std::vector<ov::frontend::Place::Ptr>& outputs) {
void InputModel::InputModelTFLiteImpl::override_all_outputs(const std::vector<ov::frontend::Place::Ptr>& outputs) {
for (const auto& output_place : m_outputs) {
auto output_lite_place =
std::dynamic_pointer_cast<ov::frontend::tensorflow_lite::TensorLitePlace>(output_place);
@ -285,10 +287,10 @@ void InputModel::InputModelTFLiteImpl::overrideAllOutputs(const std::vector<ov::
for (const auto& output_place : outputs) {
m_outputs.push_back(castToTensorPlace(output_place));
}
cleanUp();
clean_up();
}
void InputModel::InputModelTFLiteImpl::extractSubgraph(const std::vector<ov::frontend::Place::Ptr>& inputs,
void InputModel::InputModelTFLiteImpl::extract_subgraph(const std::vector<ov::frontend::Place::Ptr>& inputs,
const std::vector<ov::frontend::Place::Ptr>& outputs) {
for (const auto& input_place : m_inputs) {
auto input_lite_place = std::dynamic_pointer_cast<ov::frontend::tensorflow_lite::TensorLitePlace>(input_place);
@ -309,10 +311,10 @@ void InputModel::InputModelTFLiteImpl::extractSubgraph(const std::vector<ov::fro
for (const auto& output_place : outputs) {
m_outputs.push_back(castToTensorPlace(output_place));
}
cleanUp();
clean_up();
}
void InputModel::InputModelTFLiteImpl::cleanUp() {
void InputModel::InputModelTFLiteImpl::clean_up() {
// TODO: remove all the unnecessary tensors and operations. Could be postponed as TF Lite is OOB type of FrontEnd
}
@ -334,60 +336,60 @@ std::map<std::string, Output<Node>> InputModel::get_tensor_values() const {
}
std::vector<ov::frontend::Place::Ptr> InputModel::get_inputs() const {
return _impl->getInputs();
return _impl->get_inputs();
}
std::vector<ov::frontend::Place::Ptr> InputModel::get_outputs() const {
return _impl->getOutputs();
return _impl->get_outputs();
}
ov::frontend::Place::Ptr InputModel::get_place_by_tensor_name(const std::string& tensorName) const {
return _impl->getPlaceByTensorName(tensorName);
return _impl->get_place_by_tensor_name(tensorName);
}
void InputModel::set_partial_shape(const Place::Ptr& place, const PartialShape& shape) {
_impl->setPartialShape(place, shape);
_impl->set_partial_shape(place, shape);
}
ov::PartialShape InputModel::get_partial_shape(const Place::Ptr& place) const {
return _impl->getPartialShape(place);
return _impl->get_partial_shape(place);
}
void InputModel::set_element_type(const Place::Ptr& place, const element::Type& type) {
_impl->setElementType(place, type);
_impl->set_element_type(place, type);
}
ov::element::Type InputModel::get_element_type(const Place::Ptr& place) const {
return _impl->getElementType(place);
return _impl->get_element_type(place);
}
void InputModel::set_tensor_value(const Place::Ptr& place, const void* value) {
_impl->setTensorValue(place, value);
_impl->set_tensor_value(place, value);
}
void InputModel::set_name_for_tensor(const Place::Ptr& tensor, const std::string& new_name) {
_impl->setNameForTensor(tensor, new_name);
_impl->set_name_for_tensor(tensor, new_name);
}
void InputModel::add_name_for_tensor(const Place::Ptr& tensor, const std::string& new_name) {
_impl->addNameForTensor(tensor, new_name);
_impl->add_name_for_tensor(tensor, new_name);
}
void InputModel::set_name_for_operation(const Place::Ptr& operation, const std::string& new_name) {
_impl->setNameForOperation(operation, new_name);
_impl->set_name_for_operation(operation, new_name);
}
void InputModel::override_all_outputs(const std::vector<ov::frontend::Place::Ptr>& outputs) {
_impl->overrideAllOutputs(outputs);
_impl->override_all_outputs(outputs);
}
void InputModel::override_all_inputs(const std::vector<ov::frontend::Place::Ptr>& inputs) {
_impl->overrideAllInputs(inputs);
_impl->override_all_inputs(inputs);
}
void InputModel::extract_subgraph(const std::vector<ov::frontend::Place::Ptr>& inputs,
const std::vector<ov::frontend::Place::Ptr>& outputs) {
_impl->extractSubgraph(inputs, outputs);
_impl->extract_subgraph(inputs, outputs);
}
} // namespace tensorflow_lite

View File

@ -3,9 +3,10 @@
//
#pragma once
#include "graph_iterator_flatbuffer.hpp"
#include "input_model.hpp"
#include "openvino/frontend/extension/telemetry.hpp"
#include "openvino/frontend/input_model.hpp"
#include "openvino/frontend/tensorflow_lite/frontend.hpp"
#include "openvino/opsets/opset1.hpp"
#include "tensor_lite_place.hpp"

View File

@ -1,9 +1,11 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "tensor_lite_place.hpp"
#include "quantization_info.hpp"
#include "utils.hpp"
void ov::frontend::tensorflow_lite::TensorLitePlace::translate(ov::Output<ov::Node>& output,
bool convert_tensor_attrs_to_nodes) {

View File

@ -10,7 +10,6 @@
#include "openvino/frontend/tensorflow_lite/visibility.hpp"
#include "place.hpp"
#include "quantization_info.hpp"
#include "utils.hpp"
namespace ov {
namespace frontend {

View File

@ -6,11 +6,11 @@
#include "openvino/frontend/tensorflow_lite/frontend.hpp"
#include "openvino/frontend/tensorflow_lite/visibility.hpp"
TENSORFLOW_LITE_C_API ov::frontend::FrontEndVersion GetAPIVersion() {
TENSORFLOW_LITE_C_API ov::frontend::FrontEndVersion get_api_version() {
return OV_FRONTEND_API_VERSION;
}
TENSORFLOW_LITE_C_API void* GetFrontEndData() {
TENSORFLOW_LITE_C_API void* get_front_end_data() {
auto res = new ov::frontend::FrontEndPluginInfo();
res->m_name = "tflite";
res->m_creator = []() {

View File

@ -14,14 +14,14 @@ using namespace ov;
std::shared_ptr<ov::frontend::tensorflow_lite::QuantizationInfo> ov::frontend::tensorflow_lite::get_quantization(
const tflite::QuantizationParameters* tf_quantization) {
if (tf_quantization == NULL)
if (tf_quantization == nullptr)
return {};
auto quantization = std::make_shared<ov::frontend::tensorflow_lite::QuantizationInfo>();
auto tf_zp = tf_quantization->zero_point();
auto tf_scale = tf_quantization->scale();
if (tf_zp != NULL)
if (tf_zp != nullptr)
quantization->set_zero_point({(*tf_zp).begin(), (*tf_zp).end()});
if (tf_scale != NULL)
if (tf_scale != nullptr)
quantization->set_scale({(*tf_scale).begin(), (*tf_scale).end()});
if (quantization->get_zero_point().empty() && quantization->get_scale().empty())
return {};
@ -29,8 +29,7 @@ std::shared_ptr<ov::frontend::tensorflow_lite::QuantizationInfo> ov::frontend::t
return quantization;
}
namespace {
const std::map<tflite::TensorType, ov::element::Type>& TYPE_MAP() {
ov::element::Type ov::frontend::tensorflow_lite::get_ov_type(const tflite::TensorType& tf_type) {
static const std::map<tflite::TensorType, ov::element::Type> type_map{
{tflite::TensorType_FLOAT32, element::f32},
{tflite::TensorType_FLOAT16, element::f16},
@ -52,16 +51,9 @@ const std::map<tflite::TensorType, ov::element::Type>& TYPE_MAP() {
// {TensorType_RESOURCE, element::resource},
// {TensorType_VARIANT, element::variant},
};
return type_map;
}
} // namespace
ov::element::Type ov::frontend::tensorflow_lite::get_ov_type(const tflite::TensorType& tf_type) {
const auto& mapping = TYPE_MAP();
FRONT_END_GENERAL_CHECK(mapping.find(tf_type) != mapping.end(),
"Unexpected type: ",
tflite::EnumNameTensorType(tf_type));
return mapping.at(tf_type);
auto it = type_map.find(tf_type);
FRONT_END_GENERAL_CHECK(it != type_map.end(), "Unexpected type: ", tflite::EnumNameTensorType(tf_type));
return it->second;
}
ov::PartialShape ov::frontend::tensorflow_lite::get_ov_shape(const flatbuffers::Vector<int32_t>* tf_shape,

View File

@ -3,6 +3,7 @@
//
#pragma once
#include "decoder_flatbuffer.h"
#include "place.hpp"
#include "quantization_info.hpp"

View File

@ -117,7 +117,7 @@ file(GLOB_RECURSE plugin_api_src "${CMAKE_CURRENT_SOURCE_DIR}/dev_api/*.hpp"
add_clang_format_target(${TARGET_NAME}_plugin_api_clang FOR_SOURCES ${plugin_api_src})
ov_ncc_naming_style(FOR_TARGET ${TARGET_NAME}_plugin_api
SOURCE_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/dev_api/openvino"
SOURCE_DIRECTORIES "${CMAKE_CURRENT_SOURCE_DIR}/dev_api/openvino"
ADDITIONAL_INCLUDE_DIRECTORIES $<TARGET_PROPERTY:ngraph,INTERFACE_INCLUDE_DIRECTORIES>)
# Create object library
@ -177,7 +177,7 @@ target_include_directories(${TARGET_NAME} INTERFACE $<BUILD_INTERFACE:${PUBLIC_H
add_clang_format_target(${TARGET_NAME}_clang FOR_SOURCES ${IE_STATIC_DEPENDENT_FILES} ${LIBRARY_SRC} ${LIBRARY_HEADERS} ${PUBLIC_HEADERS})
ov_ncc_naming_style(FOR_TARGET ${TARGET_NAME}_obj
SOURCE_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/include/openvino"
SOURCE_DIRECTORIES "${CMAKE_CURRENT_SOURCE_DIR}/include/openvino"
ADDITIONAL_INCLUDE_DIRECTORIES $<TARGET_PROPERTY:ngraph,INTERFACE_INCLUDE_DIRECTORIES>)
# Static library used for unit tests which are always built

View File

@ -36,7 +36,13 @@
* @brief XML helpers function to extract values from `pugi::xml_node`
* @ingroup ie_dev_api_xml
*/
namespace XMLParseUtils {
namespace pugixml {
/**
* @brief XML helpers function to extract values from `pugi::xml_node`
* @ingroup ie_dev_api_xml
*/
namespace utils {
/**
* @brief Gets the integer attribute from `pugi::xml_node`
@ -217,7 +223,8 @@ GetPrecisionAttr(const pugi::xml_node& node, const char* str, InferenceEngine::P
*/
INFERENCE_ENGINE_API_CPP(int) GetIntChild(const pugi::xml_node& node, const char* str, int defVal);
} // namespace XMLParseUtils
} // namespace utils
} // namespace pugixml
/**
* @brief A XML parse result structure with an error message and the `pugi::xml_document` document.

View File

@ -209,8 +209,8 @@ std::istream& operator>>(std::istream& stream, CompiledBlobHeader& header) {
}
pugi::xml_node compiledBlobNode = document.document_element();
header.m_ieVersion = XMLParseUtils::GetStrAttr(compiledBlobNode, "ie_version");
header.m_fileInfo = XMLParseUtils::GetStrAttr(compiledBlobNode, "file_info");
header.m_ieVersion = pugixml::utils::GetStrAttr(compiledBlobNode, "ie_version");
header.m_fileInfo = pugixml::utils::GetStrAttr(compiledBlobNode, "file_info");
return stream;
}

View File

@ -74,7 +74,7 @@ void ov::CoreImpl::register_plugins_in_registry(const std::string& xml_config_fi
pugi::xml_document& xmlDoc = *parse_result.xml;
using namespace XMLParseUtils;
using namespace pugixml::utils;
pugi::xml_node ieNode = xmlDoc.document_element();
pugi::xml_node devicesNode = ieNode.child("plugins");

View File

@ -159,7 +159,7 @@ void registerReaders() {
}
void assertIfIRv7LikeModel(std::istream& modelStream) {
auto irVersion = details::GetIRVersion(modelStream);
auto irVersion = details::get_ir_version(modelStream);
bool isIRv7 = irVersion > 1 && irVersion <= 7;
if (!isIRv7 || reader_irv7)

View File

@ -12,7 +12,7 @@
#include "ie_precision.hpp"
int XMLParseUtils::GetIntAttr(const pugi::xml_node& node, const char* str) {
int pugixml::utils::GetIntAttr(const pugi::xml_node& node, const char* str) {
auto attr = node.attribute(str);
if (attr.empty())
IE_THROW() << "node <" << node.name() << "> is missing mandatory attribute: " << str << " at offset "
@ -27,7 +27,7 @@ int XMLParseUtils::GetIntAttr(const pugi::xml_node& node, const char* str) {
return int_value;
}
int64_t XMLParseUtils::GetInt64Attr(const pugi::xml_node& node, const char* str) {
int64_t pugixml::utils::GetInt64Attr(const pugi::xml_node& node, const char* str) {
auto attr = node.attribute(str);
if (attr.empty())
IE_THROW() << "node <" << node.name() << "> is missing mandatory attribute: " << str << " at offset "
@ -42,7 +42,7 @@ int64_t XMLParseUtils::GetInt64Attr(const pugi::xml_node& node, const char* str)
return static_cast<int64_t>(int_value);
}
uint64_t XMLParseUtils::GetUInt64Attr(const pugi::xml_node& node, const char* str) {
uint64_t pugixml::utils::GetUInt64Attr(const pugi::xml_node& node, const char* str) {
auto attr = node.attribute(str);
if (attr.empty())
IE_THROW() << "node <" << node.name() << "> is missing mandatory attribute: " << str << " at offset "
@ -57,7 +57,7 @@ uint64_t XMLParseUtils::GetUInt64Attr(const pugi::xml_node& node, const char* st
return static_cast<uint64_t>(int_value);
}
unsigned int XMLParseUtils::GetUIntAttr(const pugi::xml_node& node, const char* str) {
unsigned int pugixml::utils::GetUIntAttr(const pugi::xml_node& node, const char* str) {
auto attr = node.attribute(str);
if (attr.empty())
IE_THROW() << "node <" << node.name() << "> is missing mandatory attribute: " << str << " at offset "
@ -72,7 +72,7 @@ unsigned int XMLParseUtils::GetUIntAttr(const pugi::xml_node& node, const char*
return static_cast<unsigned int>(int_value);
}
std::string XMLParseUtils::GetStrAttr(const pugi::xml_node& node, const char* str) {
std::string pugixml::utils::GetStrAttr(const pugi::xml_node& node, const char* str) {
auto attr = node.attribute(str);
if (attr.empty())
IE_THROW() << "node <" << node.name() << "> is missing mandatory attribute: '" << str << "' at offset "
@ -80,14 +80,14 @@ std::string XMLParseUtils::GetStrAttr(const pugi::xml_node& node, const char* st
return attr.value();
}
std::string XMLParseUtils::GetStrAttr(const pugi::xml_node& node, const char* str, const char* def) {
std::string pugixml::utils::GetStrAttr(const pugi::xml_node& node, const char* str, const char* def) {
auto attr = node.attribute(str);
if (attr.empty())
return def;
return attr.value();
}
bool XMLParseUtils::GetBoolAttr(const pugi::xml_node& node, const char* str, const bool def) {
bool pugixml::utils::GetBoolAttr(const pugi::xml_node& node, const char* str, const bool def) {
auto attr = node.attribute(str);
if (attr.empty())
return def;
@ -108,7 +108,7 @@ bool XMLParseUtils::GetBoolAttr(const pugi::xml_node& node, const char* str, con
return is_true;
}
bool XMLParseUtils::GetBoolAttr(const pugi::xml_node& node, const char* str) {
bool pugixml::utils::GetBoolAttr(const pugi::xml_node& node, const char* str) {
auto attr = node.attribute(str);
if (attr.empty())
IE_THROW() << "node <" << node.name() << "> is missing mandatory attribute: " << str << " at offset "
@ -130,7 +130,7 @@ bool XMLParseUtils::GetBoolAttr(const pugi::xml_node& node, const char* str) {
return is_true;
}
float XMLParseUtils::GetFloatAttr(const pugi::xml_node& node, const char* str) {
float pugixml::utils::GetFloatAttr(const pugi::xml_node& node, const char* str) {
auto attr = node.attribute(str);
if (attr.empty())
IE_THROW() << "node <" << node.name() << "> is missing mandatory attribute: " << str << " at offset "
@ -147,7 +147,7 @@ float XMLParseUtils::GetFloatAttr(const pugi::xml_node& node, const char* str) {
return float_value;
}
InferenceEngine::Precision XMLParseUtils::GetPrecisionAttr(const pugi::xml_node& node, const char* str) {
InferenceEngine::Precision pugixml::utils::GetPrecisionAttr(const pugi::xml_node& node, const char* str) {
auto attr = node.attribute(str);
if (attr.empty())
IE_THROW() << "node <" << node.name() << "> is missing mandatory attribute: " << str << " at offset "
@ -155,7 +155,7 @@ InferenceEngine::Precision XMLParseUtils::GetPrecisionAttr(const pugi::xml_node&
return InferenceEngine::Precision::FromStr(attr.value());
}
InferenceEngine::Precision XMLParseUtils::GetPrecisionAttr(const pugi::xml_node& node,
InferenceEngine::Precision pugixml::utils::GetPrecisionAttr(const pugi::xml_node& node,
const char* str,
InferenceEngine::Precision def) {
auto attr = node.attribute(str);
@ -164,42 +164,42 @@ InferenceEngine::Precision XMLParseUtils::GetPrecisionAttr(const pugi::xml_node&
return InferenceEngine::Precision::FromStr(attr.value());
}
int XMLParseUtils::GetIntAttr(const pugi::xml_node& node, const char* str, int defVal) {
int pugixml::utils::GetIntAttr(const pugi::xml_node& node, const char* str, int defVal) {
auto attr = node.attribute(str);
if (attr.empty())
return defVal;
return GetIntAttr(node, str);
}
int64_t XMLParseUtils::GetInt64Attr(const pugi::xml_node& node, const char* str, int64_t defVal) {
int64_t pugixml::utils::GetInt64Attr(const pugi::xml_node& node, const char* str, int64_t defVal) {
auto attr = node.attribute(str);
if (attr.empty())
return defVal;
return GetInt64Attr(node, str);
}
uint64_t XMLParseUtils::GetUInt64Attr(const pugi::xml_node& node, const char* str, uint64_t defVal) {
uint64_t pugixml::utils::GetUInt64Attr(const pugi::xml_node& node, const char* str, uint64_t defVal) {
auto attr = node.attribute(str);
if (attr.empty())
return defVal;
return GetUInt64Attr(node, str);
}
unsigned int XMLParseUtils::GetUIntAttr(const pugi::xml_node& node, const char* str, unsigned int defVal) {
unsigned int pugixml::utils::GetUIntAttr(const pugi::xml_node& node, const char* str, unsigned int defVal) {
auto attr = node.attribute(str);
if (attr.empty())
return defVal;
return GetUIntAttr(node, str);
}
float XMLParseUtils::GetFloatAttr(const pugi::xml_node& node, const char* str, float defVal) {
float pugixml::utils::GetFloatAttr(const pugi::xml_node& node, const char* str, float defVal) {
auto attr = node.attribute(str);
if (attr.empty())
return defVal;
return GetFloatAttr(node, str);
}
int XMLParseUtils::GetIntChild(const pugi::xml_node& node, const char* str, int defVal) {
int pugixml::utils::GetIntChild(const pugi::xml_node& node, const char* str, int defVal) {
auto child = node.child(str);
if (child.empty())
return defVal;

View File

@ -467,7 +467,7 @@ HeteroExecutableNetwork::HeteroExecutableNetwork(std::istream& heteroModel,
IE_THROW(NetworkNotRead) << "Error reading HETERO device xml header";
}
using namespace XMLParseUtils;
using namespace pugixml::utils;
pugi::xml_node heteroNode = heteroXmlDoc.document_element();
_name = GetStrAttr(heteroNode, "name");

View File

@ -10,8 +10,8 @@
namespace InferenceEngine {
namespace details {
inline size_t GetIRVersion(pugi::xml_node& root) {
return XMLParseUtils::GetUIntAttr(root, "version", 0);
inline size_t get_ir_version(pugi::xml_node& root) {
return pugixml::utils::GetUIntAttr(root, "version", 0);
}
/**
@ -19,7 +19,7 @@ inline size_t GetIRVersion(pugi::xml_node& root) {
* @param model Models stream
* @return IR version, 0 if model does represent IR
*/
inline size_t GetIRVersion(std::istream& model) {
inline size_t get_ir_version(std::istream& model) {
std::array<char, 512> header = {};
model.seekg(0, model.beg);
@ -38,7 +38,7 @@ inline size_t GetIRVersion(std::istream& model) {
std::transform(node_name.begin(), node_name.end(), node_name.begin(), ::tolower);
if (node_name == "net") {
return GetIRVersion(root);
return get_ir_version(root);
}
}

View File

@ -47,7 +47,7 @@ StatusCode CNNNetReaderImpl::SetWeights(const TBlob<uint8_t>::Ptr& weights, Resp
}
static size_t GetFileVersion(pugi::xml_node& root) {
return XMLParseUtils::GetUIntAttr(root, "version", 0);
return pugixml::utils::GetUIntAttr(root, "version", 0);
}
StatusCode CNNNetReaderImpl::ReadNetwork(const void* model, size_t size, ResponseDesc* resp) noexcept {

View File

@ -16,7 +16,7 @@
using namespace InferenceEngine;
using namespace InferenceEngine::details;
using namespace XMLParseUtils;
using namespace pugixml::utils;
using namespace std;
void LayerParseParameters::addOutputPort(const LayerPortData& port) {
@ -72,7 +72,7 @@ void FormatParser::ParseGenericParams(pugi::xml_node& node, LayerParseParameters
layerParsePrms.underIRVersion = _version;
InferenceEngine::LayerParams& prms = layerParsePrms.prms;
prms.type = XMLParseUtils::GetStrAttr(node, "type");
prms.type = pugixml::utils::GetStrAttr(node, "type");
prms.precision = _defPrecision;
prms.name = GetStrAttr(node, "name");

View File

@ -20,7 +20,7 @@ using namespace InferenceEngine;
bool IRReader::supportModel(std::istream& model) const {
OV_ITT_SCOPED_TASK(itt::domains::V7Reader, "IRReader::supportModel");
auto version = details::GetIRVersion(model);
auto version = details::get_ir_version(model);
return version > 1 && version <= 7;
}
@ -39,7 +39,7 @@ CNNNetwork IRReader::read(std::istream& model,
}
pugi::xml_node root = xmlDoc.document_element();
auto version = details::GetIRVersion(root);
auto version = details::get_ir_version(root);
IRParser parser(version, exts);
return CNNNetwork(parser.parse(root, weights));
}

View File

@ -26,7 +26,7 @@ inline pugi::xml_node GetChild(const pugi::xml_node& node, std::vector<std::stri
return pugi::xml_node();
}
using namespace XMLParseUtils;
using namespace pugixml::utils;
namespace InferenceEngine {
namespace details {

View File

@ -18,7 +18,7 @@
#endif
using namespace InferenceEngine;
using namespace XMLParseUtils;
using namespace pugixml::utils;
#define CheckAndReturnError(cond, errorMsg) \
if (cond) { std::stringstream ss; ss << errorMsg; m_ErrorMessage = ss.str(); return; }

View File

@ -21,12 +21,12 @@ int MockSetup::m_max_output_port_index = 0;
PartialShape InputModelMockPy::m_returnShape = {};
extern "C" MOCK_API FrontEndVersion GetAPIVersion()
extern "C" MOCK_API FrontEndVersion get_api_version()
{
return OV_FRONTEND_API_VERSION;
}
extern "C" MOCK_API void* GetFrontEndData()
extern "C" MOCK_API void* get_front_end_data()
{
FrontEndPluginInfo* res = new FrontEndPluginInfo();
res->m_name = "openvino_mock_mo_frontend";