Moved all tensorflow proto files to a folder with ov prefix to avoid ODR (#20636)

conflicts
This commit is contained in:
Ilya Lavrenov 2023-10-22 01:02:32 +04:00 committed by GitHub
parent b17d0fe7f5
commit 0934d2a7dd
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
62 changed files with 297 additions and 289 deletions

View File

@ -125,17 +125,24 @@ macro(ov_add_frontend)
source_group("public include" FILES ${LIBRARY_PUBLIC_HEADERS})
# Generate protobuf file on build time for each '.proto' file in src/proto
file(GLOB proto_files ${frontend_root_dir}/src/proto/*.proto)
set(protofiles_root_dir "${frontend_root_dir}/src/proto")
file(GLOB_RECURSE proto_files ${protofiles_root_dir}/*.proto)
foreach(proto_file IN LISTS proto_files)
# filter out standaard google proto files
if(proto_file MATCHES ".*google.*")
continue()
endif()
file(RELATIVE_PATH proto_file_relative "${CMAKE_SOURCE_DIR}" "${proto_file}")
get_filename_component(FILE_DIR ${proto_file} DIRECTORY)
get_filename_component(FILE_WE ${proto_file} NAME_WE)
set(OUTPUT_PB_SRC ${CMAKE_CURRENT_BINARY_DIR}/${FILE_WE}.pb.cc)
set(OUTPUT_PB_HEADER ${CMAKE_CURRENT_BINARY_DIR}/${FILE_WE}.pb.h)
file(RELATIVE_PATH relative_path ${protofiles_root_dir} ${proto_file})
get_filename_component(relative_path ${relative_path} DIRECTORY)
set(OUTPUT_PB_SRC ${CMAKE_CURRENT_BINARY_DIR}/${relative_path}/${FILE_WE}.pb.cc)
set(OUTPUT_PB_HEADER ${CMAKE_CURRENT_BINARY_DIR}/${relative_path}/${FILE_WE}.pb.h)
add_custom_command(
OUTPUT "${OUTPUT_PB_SRC}" "${OUTPUT_PB_HEADER}"
COMMAND ${PROTOC_EXECUTABLE} ARGS --cpp_out ${CMAKE_CURRENT_BINARY_DIR} -I ${FILE_DIR} ${FILE_WE}.proto
COMMAND ${PROTOC_EXECUTABLE} ARGS --cpp_out ${CMAKE_CURRENT_BINARY_DIR} -I ${protofiles_root_dir} ${proto_file}
DEPENDS ${PROTOC_DEPENDENCY} ${proto_file}
COMMENT "Running C++ protocol buffer compiler (${PROTOC_EXECUTABLE}) on ${proto_file_relative}"
VERBATIM

View File

@ -19,9 +19,9 @@ namespace ov {
namespace frontend {
namespace paddle {
using namespace ::ov_paddle::framework;
using namespace ::paddle::framework;
ov::element::Type get_ov_type(const ::ov_paddle::framework::proto::VarType_Type& type) {
ov::element::Type get_ov_type(const ::paddle::framework::proto::VarType_Type& type) {
static const std::map<proto::VarType_Type, ov::element::Type> type_map{
{proto::VarType_Type::VarType_Type_BOOL, ov::element::boolean},
{proto::VarType_Type::VarType_Type_INT16, ov::element::i16},
@ -189,7 +189,7 @@ std::vector<proto::OpDesc_Attr> DecoderProto::decode_attribute_helper(const std:
namespace {
inline std::map<std::string, OutputVector> map_for_each_input_impl(
const google::protobuf::RepeatedPtrField<::ov_paddle::framework::proto::OpDesc_Var>& c,
const google::protobuf::RepeatedPtrField<::paddle::framework::proto::OpDesc_Var>& c,
const std::function<Output<Node>(const std::string&, size_t)>& func) {
size_t idx = 0;
std::map<std::string, OutputVector> res;

View File

@ -23,7 +23,7 @@ namespace ov {
namespace frontend {
namespace paddle {
ov::element::Type get_ov_type(const ::ov_paddle::framework::proto::VarType_Type& type);
ov::element::Type get_ov_type(const ::paddle::framework::proto::VarType_Type& type);
class DecoderProto : public paddle::DecoderBase {
public:
@ -56,7 +56,7 @@ public:
const std::function<Output<Node>(const std::string&, size_t)>& func) const;
private:
std::vector<::ov_paddle::framework::proto::OpDesc_Attr> decode_attribute_helper(const std::string& name) const;
std::vector<::paddle::framework::proto::OpDesc_Attr> decode_attribute_helper(const std::string& name) const;
std::weak_ptr<OpPlace> op_place;
const std::shared_ptr<OpPlace> get_place() const {

View File

@ -393,7 +393,7 @@ bool FrontEnd::supported_impl(const std::vector<ov::Any>& variants) const {
else if (variants[0].is<std::istream*>()) {
// Validating first stream, it must contain a model
auto p_model_stream = variants[0].as<std::istream*>();
::ov_paddle::framework::proto::ProgramDesc fw;
::paddle::framework::proto::ProgramDesc fw;
return fw.ParseFromIstream(p_model_stream);
}
return false;

View File

@ -21,7 +21,7 @@ namespace ov {
namespace frontend {
namespace paddle {
using namespace ::ov_paddle::framework::proto;
using namespace ::paddle::framework::proto;
class InputModel::InputModelImpl {
public:
@ -279,7 +279,7 @@ void InputModel::InputModelImpl::load_consts(const std::basic_string<T>& folder_
if (!var_desc.persistable())
continue;
FRONT_END_GENERAL_CHECK(var_desc.type().type() == ::ov_paddle::framework::proto::VarType::LOD_TENSOR);
FRONT_END_GENERAL_CHECK(var_desc.type().type() == ::paddle::framework::proto::VarType::LOD_TENSOR);
const auto& tensor = var_desc.type().lod_tensor().tensor();
Shape shape(tensor.dims().cbegin(), tensor.dims().cend());
const auto& type = get_ov_type(tensor.data_type());
@ -324,7 +324,7 @@ void InputModel::InputModelImpl::load_consts(std::istream* weight_stream) {
if (!var_desc.persistable())
continue;
FRONT_END_GENERAL_CHECK(var_desc.type().type() == ::ov_paddle::framework::proto::VarType::LOD_TENSOR);
FRONT_END_GENERAL_CHECK(var_desc.type().type() == ::paddle::framework::proto::VarType::LOD_TENSOR);
FRONT_END_GENERAL_CHECK(weight_stream != nullptr && weight_stream->peek() != EOF,
"PaddlePaddle *.pdiparams format weight file doesn't exist!");
/*
@ -350,8 +350,8 @@ void InputModel::InputModelImpl::load_consts(std::istream* weight_stream) {
std::unique_ptr<char[]> buf(new char[size]);
weight_stream->read(reinterpret_cast<char*>(buf.get()), size);
std::unique_ptr<::ov_paddle::framework::proto::VarType_TensorDesc> tensor_desc(
new ::ov_paddle::framework::proto::VarType_TensorDesc());
std::unique_ptr<::paddle::framework::proto::VarType_TensorDesc> tensor_desc(
new ::paddle::framework::proto::VarType_TensorDesc());
tensor_desc->ParseFromArray(buf.get(), size);
Shape shape(tensor_desc->dims().cbegin(), tensor_desc->dims().cend());
const auto& type = get_ov_type(tensor_desc->data_type());

View File

@ -29,12 +29,12 @@ bool Place::is_output() const {
}
OpPlace::OpPlace(const ov::frontend::InputModel& input_model,
const ::ov_paddle::framework::proto::OpDesc& op_desc,
const ::paddle::framework::proto::OpDesc& op_desc,
const std::vector<std::string>& names)
: Place(input_model, names),
m_op_desc(op_desc) {}
OpPlace::OpPlace(const ov::frontend::InputModel& input_model, const ::ov_paddle::framework::proto::OpDesc& op_desc)
OpPlace::OpPlace(const ov::frontend::InputModel& input_model, const ::paddle::framework::proto::OpDesc& op_desc)
: OpPlace(input_model, op_desc, {}) {}
const std::map<std::string, std::vector<std::shared_ptr<OutPortPlace>>>& OpPlace::get_output_ports() const {
@ -58,7 +58,7 @@ std::shared_ptr<InPortPlace> OpPlace::get_input_port_paddle(const std::string& i
return m_input_ports.at(inputName)[inputPortIndex];
}
const ::ov_paddle::framework::proto::OpDesc& OpPlace::get_desc() const {
const ::paddle::framework::proto::OpDesc& OpPlace::get_desc() const {
return m_op_desc;
}
@ -207,11 +207,11 @@ Place::Ptr OpPlace::get_target_tensor(int outputPortIndex) const {
TensorPlace::TensorPlace(const ov::frontend::InputModel& input_model,
const std::vector<std::string>& names,
const ::ov_paddle::framework::proto::VarDesc& var_desc)
const ::paddle::framework::proto::VarDesc& var_desc)
: Place(input_model, names),
m_var_desc(var_desc) {
const auto& var_type = var_desc.type();
if (var_type.type() == ::ov_paddle::framework::proto::VarType::LOD_TENSOR) {
if (var_type.type() == ::paddle::framework::proto::VarType::LOD_TENSOR) {
const auto& tensor_desc = var_type.lod_tensor().tensor();
m_type = get_ov_type(tensor_desc.data_type());
m_pshape = PartialShape(std::vector<Dimension>(tensor_desc.dims().begin(), tensor_desc.dims().end()));
@ -219,7 +219,7 @@ TensorPlace::TensorPlace(const ov::frontend::InputModel& input_model,
}
TensorPlace::TensorPlace(const ov::frontend::InputModel& input_model,
const ::ov_paddle::framework::proto::VarDesc& var_desc)
const ::paddle::framework::proto::VarDesc& var_desc)
: TensorPlace(input_model, {var_desc.name()}, var_desc) {}
std::vector<Place::Ptr> TensorPlace::get_consuming_ports() const {
@ -250,7 +250,7 @@ void TensorPlace::add_consuming_port(const std::shared_ptr<InPortPlace>& in_port
m_consuming_ports.push_back(in_port);
}
const ::ov_paddle::framework::proto::VarDesc& TensorPlace::get_desc() const {
const ::paddle::framework::proto::VarDesc& TensorPlace::get_desc() const {
return m_var_desc;
}

View File

@ -7,7 +7,7 @@
#include "input_model.hpp"
#include "openvino/frontend/manager.hpp"
namespace ov_paddle {
namespace paddle {
namespace framework {
namespace proto {
class OpDesc;
@ -15,7 +15,7 @@ class VarDesc;
} // namespace proto
} // namespace framework
} // namespace ov_paddle
} // namespace paddle
namespace ov {
namespace frontend {
@ -101,10 +101,10 @@ private:
class OpPlace : public Place {
public:
OpPlace(const ov::frontend::InputModel& input_model,
const ::ov_paddle::framework::proto::OpDesc& op_desc,
const ::paddle::framework::proto::OpDesc& op_desc,
const std::vector<std::string>& names);
OpPlace(const ov::frontend::InputModel& input_model, const ::ov_paddle::framework::proto::OpDesc& op_desc);
OpPlace(const ov::frontend::InputModel& input_model, const ::paddle::framework::proto::OpDesc& op_desc);
void add_in_port(const std::shared_ptr<InPortPlace>& input, const std::string& name);
void add_out_port(const std::shared_ptr<OutPortPlace>& output, const std::string& name);
@ -114,7 +114,7 @@ public:
const std::map<std::string, std::vector<std::shared_ptr<InPortPlace>>>& get_input_ports() const;
std::shared_ptr<OutPortPlace> get_output_port_paddle(const std::string& outputName, int outputPortIndex) const;
std::shared_ptr<InPortPlace> get_input_port_paddle(const std::string& inputName, int inputPortIndex) const;
const ::ov_paddle::framework::proto::OpDesc& get_desc() const;
const ::paddle::framework::proto::OpDesc& get_desc() const;
const std::shared_ptr<DecoderBase> get_decoder() const;
void set_decoder(const std::shared_ptr<DecoderBase> op_decoder);
@ -152,7 +152,7 @@ public:
Ptr get_target_tensor(const std::string& outputName, int outputPortIndex) const override;
private:
const ::ov_paddle::framework::proto::OpDesc& m_op_desc; // TODO: to conceal it behind decoder.
const ::paddle::framework::proto::OpDesc& m_op_desc; // TODO: to conceal it behind decoder.
std::shared_ptr<DecoderBase> m_op_decoder;
std::map<std::string, std::vector<std::shared_ptr<InPortPlace>>> m_input_ports;
std::map<std::string, std::vector<std::shared_ptr<OutPortPlace>>> m_output_ports;
@ -162,9 +162,9 @@ class TensorPlace : public Place {
public:
TensorPlace(const ov::frontend::InputModel& input_model,
const std::vector<std::string>& names,
const ::ov_paddle::framework::proto::VarDesc& var_desc);
const ::paddle::framework::proto::VarDesc& var_desc);
TensorPlace(const ov::frontend::InputModel& input_model, const ::ov_paddle::framework::proto::VarDesc& var_desc);
TensorPlace(const ov::frontend::InputModel& input_model, const ::paddle::framework::proto::VarDesc& var_desc);
void add_producing_port(const std::shared_ptr<OutPortPlace>& out_port);
void add_consuming_port(const std::shared_ptr<InPortPlace>& in_port);
@ -182,7 +182,7 @@ public:
void set_element_type(const element::Type& type) {
m_type = type;
}
const ::ov_paddle::framework::proto::VarDesc& get_desc() const;
const ::paddle::framework::proto::VarDesc& get_desc() const;
// External usage
Ptr get_producing_operation() const override;
@ -192,7 +192,7 @@ public:
bool is_equal_data(const Ptr& another) const override;
private:
const ::ov_paddle::framework::proto::VarDesc& m_var_desc;
const ::paddle::framework::proto::VarDesc& m_var_desc;
PartialShape m_pshape;
element::Type m_type;

View File

@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */
syntax = "proto2";
package ov_paddle.framework.proto;
package paddle.framework.proto;
option optimize_for = LITE_RUNTIME; // Added by Intel Corporation 2021-2022

View File

@ -7,7 +7,7 @@
#include "checkpoint_utils.hpp"
#include "openvino/frontend/exception.hpp"
#include "openvino/util/file_util.hpp"
#include "saved_tensor_slice.pb.h"
#include "ov_tensorflow/saved_tensor_slice.pb.h"
#include "tf_utils.hpp"
#ifdef ENABLE_SNAPPY_COMPRESSION
@ -68,7 +68,7 @@ void CheckpointV1Reader::initialize() {
// parse empty index block
// This is only present at the first item of each checkpoint file and serves
// as a table of contents, listing all the tensor slices saved in this file.
::ov_tensorflow::SavedTensorSlices sts;
::tensorflow::SavedTensorSlices sts;
FRONT_END_GENERAL_CHECK(sts.ParseFromArray(value.data(), static_cast<int>(value.size())),
"[TensorFlow Frontend] incorrect input checkpoint file or internal error: cannot parse "
"SavedTensorSlices entry");
@ -254,7 +254,7 @@ void CheckpointV1Reader::read_variable(const std::string& variable_name, ov::Any
// This is only present at the first item of each checkpoint file and serves
// as a table of contents, listing all the tensor slices saved in this file.
::ov_tensorflow::SavedTensorSlices sts;
::tensorflow::SavedTensorSlices sts;
FRONT_END_GENERAL_CHECK(sts.ParseFromArray(raw_data.data(), static_cast<int>(raw_data.size())),
"[TensorFlow Frontend] incorrect input checkpoint file or internal error: cannot parse "
"SavedTensorSlices entry");

View File

@ -12,17 +12,17 @@
#include "checkpoint_utils.hpp"
#include "openvino/core/any.hpp"
#include "openvino/frontend/exception.hpp"
#include "saved_tensor_slice.pb.h"
#include "tensor_shape.pb.h"
#include "types.pb.h"
#include "ov_tensorflow/saved_tensor_slice.pb.h"
#include "ov_tensorflow/tensor_shape.pb.h"
#include "ov_tensorflow/types.pb.h"
namespace ov {
namespace frontend {
namespace tensorflow {
// stores information about shape, type, and shard id for Variable
struct VariableInfo {
::ov_tensorflow::TensorShapeProto variable_shape;
::ov_tensorflow::DataType variable_type;
::tensorflow::TensorShapeProto variable_shape;
::tensorflow::DataType variable_type;
int32_t shard_id;
size_t offset;
size_t size;

View File

@ -5,11 +5,11 @@
#include "decoder_argdef.hpp"
#include "decoder_proto.hpp"
#include "op_def.pb.h"
#include "openvino/frontend/tensorflow/node_context.hpp"
#include "openvino/frontend/tensorflow/special_types.hpp"
#include "ov_tensorflow/op_def.pb.h"
#include "ov_tensorflow/types.pb.h"
#include "tf_utils.hpp"
#include "types.pb.h"
namespace ov {
namespace frontend {

View File

@ -9,11 +9,11 @@
#include "openvino/frontend/tensorflow/decoder.hpp"
namespace ov_tensorflow {
namespace tensorflow {
class GraphDef;
class FunctionDef;
class OpDef_ArgDef;
} // namespace ov_tensorflow
} // namespace tensorflow
namespace ov {
namespace frontend {
@ -21,18 +21,18 @@ namespace tensorflow {
class DecoderArgDef : public ov::frontend::tensorflow::DecoderBase {
public:
explicit DecoderArgDef(const ::ov_tensorflow::OpDef_ArgDef* arg_def,
const std::shared_ptr<::ov_tensorflow::GraphDef>& graph_def,
const std::shared_ptr<::ov_tensorflow::FunctionDef>& func_def,
explicit DecoderArgDef(const ::tensorflow::OpDef_ArgDef* arg_def,
const std::shared_ptr<::tensorflow::GraphDef>& graph_def,
const std::shared_ptr<::tensorflow::FunctionDef>& func_def,
const std::string& op_type)
: m_arg_def(arg_def),
m_graph_def(graph_def),
m_func_def(func_def),
m_op_type(op_type) {}
explicit DecoderArgDef(const ::ov_tensorflow::OpDef_ArgDef* arg_def,
const std::shared_ptr<::ov_tensorflow::GraphDef>& graph_def,
const std::shared_ptr<::ov_tensorflow::FunctionDef>& func_def,
explicit DecoderArgDef(const ::tensorflow::OpDef_ArgDef* arg_def,
const std::shared_ptr<::tensorflow::GraphDef>& graph_def,
const std::shared_ptr<::tensorflow::FunctionDef>& func_def,
const std::string& op_type,
const std::string& producer_name)
: m_arg_def(arg_def),
@ -55,13 +55,13 @@ public:
const std::string& get_op_name() const override;
private:
const ::ov_tensorflow::OpDef_ArgDef* m_arg_def;
const ::tensorflow::OpDef_ArgDef* m_arg_def;
// For existence of OpDef_ArgDef object corresponding to the main graph node,
// GraphDef object must live in the memory
const std::shared_ptr<::ov_tensorflow::GraphDef> m_graph_def;
const std::shared_ptr<::tensorflow::GraphDef> m_graph_def;
// For existence of OpDef_ArgDef object corresponding to the body graph node,
// both GraphDef and FunctionDef objects must be alive in the memory
const std::shared_ptr<::ov_tensorflow::FunctionDef> m_func_def;
const std::shared_ptr<::tensorflow::FunctionDef> m_func_def;
const std::string m_op_type;
const std::string m_producer_name;
};

View File

@ -4,12 +4,12 @@
#include "decoder_proto.hpp"
#include "attr_value.pb.h"
#include "node_def.pb.h"
#include "openvino/frontend/tensorflow/node_context.hpp"
#include "openvino/frontend/tensorflow/special_types.hpp"
#include "ov_tensorflow/attr_value.pb.h"
#include "ov_tensorflow/node_def.pb.h"
#include "ov_tensorflow/types.pb.h"
#include "tf_utils.hpp"
#include "types.pb.h"
namespace ov {
namespace frontend {
@ -38,7 +38,7 @@ void extract_tensor_content(const std::string& tensor_content, ov::Tensor* value
# pragma warning(disable : 4267) // possible loss of data
#endif
template <typename T>
void extract_compressed_tensor_content(const ::ov_tensorflow::TensorProto& tensor_proto,
void extract_compressed_tensor_content(const ::tensorflow::TensorProto& tensor_proto,
int64_t val_size,
ov::Tensor* values) {
auto val_lastsaved = static_cast<T>(0);
@ -90,15 +90,15 @@ ov::Any DecoderProto::get_attribute(const std::string& name) const {
}
switch (attrs[0].value_case()) {
case ::ov_tensorflow::AttrValue::ValueCase::kB:
case ::tensorflow::AttrValue::ValueCase::kB:
return attrs[0].b();
case ::ov_tensorflow::AttrValue::ValueCase::kF:
case ::tensorflow::AttrValue::ValueCase::kF:
return attrs[0].f();
case ::ov_tensorflow::AttrValue::ValueCase::kS:
case ::tensorflow::AttrValue::ValueCase::kS:
return attrs[0].s();
case ::ov_tensorflow::AttrValue::ValueCase::kI:
case ::tensorflow::AttrValue::ValueCase::kI:
return attrs[0].i();
case ::ov_tensorflow::AttrValue::ValueCase::kShape: {
case ::tensorflow::AttrValue::ValueCase::kShape: {
const auto& tf_shape = attrs[0].shape();
if (tf_shape.unknown_rank()) {
return ov::PartialShape::dynamic();
@ -111,16 +111,16 @@ ov::Any DecoderProto::get_attribute(const std::string& name) const {
return ov::PartialShape(dims);
}
case ::ov_tensorflow::AttrValue::ValueCase::kType: {
case ::tensorflow::AttrValue::ValueCase::kType: {
auto atype = attrs[0].type();
if (atype != ::ov_tensorflow::DT_STRING) {
if (atype != ::tensorflow::DT_STRING) {
return get_ov_type(attrs[0].type());
} else {
return ov::Any("DT_STRING");
}
}
case ::ov_tensorflow::AttrValue::ValueCase::kList: {
case ::tensorflow::AttrValue::ValueCase::kList: {
const auto& list = attrs[0].list();
if (list.i_size())
return std::vector<int64_t>(list.i().begin(), list.i().end());
@ -156,7 +156,7 @@ ov::Any DecoderProto::get_attribute(const std::string& name) const {
if (list.type_size()) {
std::vector<ov::element::Type> res;
for (int idx = 0; idx < list.type_size(); ++idx) {
if (list.type(idx) != ::ov_tensorflow::DataType::DT_STRING) {
if (list.type(idx) != ::tensorflow::DataType::DT_STRING) {
res.emplace_back(get_ov_type(list.type(idx)));
} else {
res.emplace_back(ov::element::dynamic);
@ -176,15 +176,15 @@ ov::Any DecoderProto::get_attribute(const std::string& name) const {
return EmptyList();
}
case ::ov_tensorflow::AttrValue::ValueCase::kTensor: {
case ::tensorflow::AttrValue::ValueCase::kTensor: {
return unpack_tensor_proto(attrs[0].tensor());
}
case ::ov_tensorflow::AttrValue::ValueCase::kPlaceholder:
case ::tensorflow::AttrValue::ValueCase::kPlaceholder:
FRONT_END_GENERAL_CHECK(false,
"Conversion from Tensorflow to OpenVINO data type failed: Placeholder type for '",
name,
"' attribute is not supported.");
case ::ov_tensorflow::AttrValue::ValueCase::kFunc:
case ::tensorflow::AttrValue::ValueCase::kFunc:
// attrs[0].func() returns NameAttrList object from which
// we retrieve the function name
// Further, InputModel object is created for FunctionDef with this name
@ -251,7 +251,7 @@ const std::string& DecoderProto::get_op_name() const {
return m_node_def->name();
}
std::vector<::ov_tensorflow::AttrValue> DecoderProto::decode_attribute_helper(const std::string& name) const {
std::vector<::tensorflow::AttrValue> DecoderProto::decode_attribute_helper(const std::string& name) const {
auto attr_map = m_node_def->attr();
if (attr_map.contains(name)) {
auto value = m_node_def->attr().at(name);

View File

@ -9,14 +9,14 @@
#include "openvino/core/type/element_type.hpp"
#include "openvino/frontend/tensorflow/decoder.hpp"
#include "types.pb.h"
#include "ov_tensorflow/types.pb.h"
namespace ov_tensorflow {
namespace tensorflow {
class GraphDef;
class FunctionDef;
class NodeDef;
class AttrValue;
} // namespace ov_tensorflow
} // namespace tensorflow
namespace ov {
namespace frontend {
@ -29,15 +29,15 @@ void parse_producer_name(const std::string& producer_port_name,
class DecoderProto : public ov::frontend::tensorflow::DecoderBase {
public:
explicit DecoderProto(const ::ov_tensorflow::NodeDef* node_def,
const std::shared_ptr<::ov_tensorflow::GraphDef>& graph_def)
explicit DecoderProto(const ::tensorflow::NodeDef* node_def,
const std::shared_ptr<::tensorflow::GraphDef>& graph_def)
: m_node_def(node_def),
m_graph_def(graph_def),
m_func_def(nullptr) {}
explicit DecoderProto(const ::ov_tensorflow::NodeDef* node_def,
const std::shared_ptr<::ov_tensorflow::GraphDef>& graph_def,
const std::shared_ptr<::ov_tensorflow::FunctionDef>& func_def)
explicit DecoderProto(const ::tensorflow::NodeDef* node_def,
const std::shared_ptr<::tensorflow::GraphDef>& graph_def,
const std::shared_ptr<::tensorflow::FunctionDef>& func_def)
: m_node_def(node_def),
m_graph_def(graph_def),
m_func_def(func_def) {}
@ -56,14 +56,14 @@ public:
const std::string& get_op_name() const override;
private:
std::vector<::ov_tensorflow::AttrValue> decode_attribute_helper(const std::string& name) const;
const ::ov_tensorflow::NodeDef* m_node_def;
std::vector<::tensorflow::AttrValue> decode_attribute_helper(const std::string& name) const;
const ::tensorflow::NodeDef* m_node_def;
// For existence of NodeDef object corresponding to the main graph node,
// GraphDef object must live in the memory
const std::shared_ptr<::ov_tensorflow::GraphDef> m_graph_def;
const std::shared_ptr<::tensorflow::GraphDef> m_graph_def;
// For existence of NodeDef object corresponding to the body graph node,
// both GraphDef and FunctionDef objects must be alive in the memory
const std::shared_ptr<::ov_tensorflow::FunctionDef> m_func_def;
const std::shared_ptr<::tensorflow::FunctionDef> m_func_def;
};
} // namespace tensorflow
} // namespace frontend

View File

@ -10,26 +10,26 @@
#include <string>
#include "openvino/core/type/element_type.hpp"
#include "tensor_bundle.pb.h"
#include "trackable_object_graph.pb.h"
#include "ov_tensorflow/tensor_bundle.pb.h"
#include "ov_tensorflow/trackable_object_graph.pb.h"
namespace ov {
namespace frontend {
namespace tensorflow {
bool GraphIteratorMeta::is_valid_signature(const ::ov_tensorflow::SignatureDef& signature) const {
const std::map<::ov_tensorflow::DataType, ov::element::Type> types{
{::ov_tensorflow::DataType::DT_BOOL, ov::element::boolean},
{::ov_tensorflow::DataType::DT_INT16, ov::element::i16},
{::ov_tensorflow::DataType::DT_INT32, ov::element::i32},
{::ov_tensorflow::DataType::DT_INT64, ov::element::i64},
{::ov_tensorflow::DataType::DT_HALF, ov::element::f16},
{::ov_tensorflow::DataType::DT_FLOAT, ov::element::f32},
{::ov_tensorflow::DataType::DT_DOUBLE, ov::element::f64},
{::ov_tensorflow::DataType::DT_UINT8, ov::element::u8},
{::ov_tensorflow::DataType::DT_INT8, ov::element::i8},
{::ov_tensorflow::DataType::DT_BFLOAT16, ov::element::bf16},
{::ov_tensorflow::DataType::DT_STRING, ov::element::dynamic}};
bool GraphIteratorMeta::is_valid_signature(const ::tensorflow::SignatureDef& signature) const {
const std::map<::tensorflow::DataType, ov::element::Type> types{
{::tensorflow::DataType::DT_BOOL, ov::element::boolean},
{::tensorflow::DataType::DT_INT16, ov::element::i16},
{::tensorflow::DataType::DT_INT32, ov::element::i32},
{::tensorflow::DataType::DT_INT64, ov::element::i64},
{::tensorflow::DataType::DT_HALF, ov::element::f16},
{::tensorflow::DataType::DT_FLOAT, ov::element::f32},
{::tensorflow::DataType::DT_DOUBLE, ov::element::f64},
{::tensorflow::DataType::DT_UINT8, ov::element::u8},
{::tensorflow::DataType::DT_INT8, ov::element::i8},
{::tensorflow::DataType::DT_BFLOAT16, ov::element::bf16},
{::tensorflow::DataType::DT_STRING, ov::element::dynamic}};
for (const auto& it : signature.inputs()) {
if (it.second.name().empty() || types.find(it.second.dtype()) == types.end())

View File

@ -27,7 +27,7 @@ std::basic_string<wchar_t> get_variables_index_name<wchar_t>(const std::wstring
// Loads graph from Tensorflow MetaGraph file (*.meta)
class GraphIteratorMeta : public GraphIteratorProto {
std::shared_ptr<::ov_tensorflow::MetaGraphDef> m_metagraph_def;
std::shared_ptr<::tensorflow::MetaGraphDef> m_metagraph_def;
std::shared_ptr<VariablesIndex> m_variables_index;
std::shared_ptr<std::map<std::string, std::string>> m_inputs_map;
std::shared_ptr<std::map<std::string, std::string>> m_outputs_map;
@ -36,7 +36,7 @@ class GraphIteratorMeta : public GraphIteratorProto {
public:
template <typename T>
GraphIteratorMeta(const std::basic_string<T>& path, const bool mmap_enabled)
: m_metagraph_def(std::make_shared<::ov_tensorflow::MetaGraphDef>()),
: m_metagraph_def(std::make_shared<::tensorflow::MetaGraphDef>()),
m_mmap_enabled(mmap_enabled) {
this->read_meta(path);
}
@ -45,7 +45,7 @@ public:
static bool is_supported(const std::basic_string<T>& path) {
try {
std::ifstream mg_stream(path.c_str(), std::ios::in | std::ifstream::binary);
auto metagraph_def = std::make_shared<::ov_tensorflow::MetaGraphDef>();
auto metagraph_def = std::make_shared<::tensorflow::MetaGraphDef>();
return mg_stream && mg_stream.is_open() && metagraph_def->ParsePartialFromIstream(&mg_stream) &&
metagraph_def->has_graph_def() && metagraph_def->graph_def().node_size() > 0;
} catch (...) {
@ -66,7 +66,7 @@ public:
}
private:
bool is_valid_signature(const ::ov_tensorflow::SignatureDef& signature) const;
bool is_valid_signature(const ::tensorflow::SignatureDef& signature) const;
template <typename T>
bool read_meta(const std::basic_string<T>& path) {
@ -87,10 +87,10 @@ private:
bool res = m_metagraph_def->ParseFromIstream(&mg_stream);
FRONT_END_GENERAL_CHECK(res && m_metagraph_def->has_graph_def(), "MetaGraph cannot be parsed");
std::map<std::string, const ::ov_tensorflow::SignatureDef*> validSignatures = {};
std::map<std::string, const ::tensorflow::SignatureDef*> validSignatures = {};
for (const auto& sit : m_metagraph_def->signature_def()) {
const std::string& key = sit.first;
const ::ov_tensorflow::SignatureDef& val = sit.second;
const ::tensorflow::SignatureDef& val = sit.second;
if (is_valid_signature(val)) {
validSignatures[key] = &val;
}
@ -114,7 +114,7 @@ private:
}
}
m_graph_def = std::make_shared<::ov_tensorflow::GraphDef>(m_metagraph_def->graph_def());
m_graph_def = std::make_shared<::tensorflow::GraphDef>(m_metagraph_def->graph_def());
// Update variables map using information by resolving AssignVariableOp graph nodes
std::map<std::string, std::string> var_map;

View File

@ -10,10 +10,10 @@
#include "checkpoint_v1_reader.hpp"
#include "decoder_argdef.hpp"
#include "decoder_proto.hpp"
#include "graph.pb.h"
#include "openvino/frontend/exception.hpp"
#include "openvino/frontend/graph_iterator.hpp"
#include "openvino/frontend/tensorflow/decoder.hpp"
#include "ov_tensorflow/graph.pb.h"
namespace ov {
namespace frontend {
@ -21,8 +21,8 @@ namespace tensorflow {
class GraphIteratorProto : public GraphIterator {
protected:
std::shared_ptr<::ov_tensorflow::GraphDef> m_graph_def;
std::shared_ptr<::ov_tensorflow::FunctionDef> m_func_def;
std::shared_ptr<::tensorflow::GraphDef> m_graph_def;
std::shared_ptr<::tensorflow::FunctionDef> m_func_def;
std::shared_ptr<CheckpointV1Reader> m_checkpoint_v1_reader;
size_t node_index = 0;
@ -32,7 +32,7 @@ protected:
std::vector<std::string> m_output_names;
GraphIteratorProto()
: m_graph_def(std::make_shared<::ov_tensorflow::GraphDef>()),
: m_graph_def(std::make_shared<::tensorflow::GraphDef>()),
m_func_def(nullptr),
m_checkpoint_v1_reader(nullptr),
m_library_map() {}
@ -62,8 +62,8 @@ protected:
}
public:
GraphIteratorProto(const std::shared_ptr<::ov_tensorflow::GraphDef>& graph_def,
const std::shared_ptr<::ov_tensorflow::FunctionDef>& func_def,
GraphIteratorProto(const std::shared_ptr<::tensorflow::GraphDef>& graph_def,
const std::shared_ptr<::tensorflow::FunctionDef>& func_def,
const std::unordered_map<std::string, int>& library_map,
const std::shared_ptr<CheckpointV1Reader> checkpoint_v1_reader)
: m_graph_def(graph_def),
@ -105,7 +105,7 @@ public:
/// \brief Construct GraphIterator for the frozen model without v1 checkpoints
template <typename T>
GraphIteratorProto(const std::basic_string<T>& model_path)
: m_graph_def(std::make_shared<::ov_tensorflow::GraphDef>()),
: m_graph_def(std::make_shared<::tensorflow::GraphDef>()),
m_func_def(nullptr),
m_checkpoint_v1_reader(nullptr) {
std::ifstream pb_stream(model_path, std::ios::in | std::ifstream::binary);
@ -119,7 +119,7 @@ public:
/// \brief Construct GraphIterator for the frozen model with v1 checkpoints
template <typename T>
GraphIteratorProto(const std::basic_string<T>& model_path, const std::basic_string<T>& checkpoint_directory)
: m_graph_def(std::make_shared<::ov_tensorflow::GraphDef>()),
: m_graph_def(std::make_shared<::tensorflow::GraphDef>()),
m_func_def(nullptr),
m_checkpoint_v1_reader(nullptr) {
std::ifstream pb_stream(model_path, std::ios::in | std::ifstream::binary);
@ -136,7 +136,7 @@ public:
static bool is_supported(const std::basic_string<T>& path) {
try {
std::ifstream pb_stream(path, std::ios::in | std::ifstream::binary);
auto graph_def = std::make_shared<::ov_tensorflow::GraphDef>();
auto graph_def = std::make_shared<::tensorflow::GraphDef>();
return pb_stream && pb_stream.is_open() && graph_def->ParsePartialFromIstream(&pb_stream) &&
graph_def->node_size() > 0;
} catch (...) {
@ -184,7 +184,7 @@ public:
"[TensorFlow Error] Internal Error: incorrect library map to cache function indices by names.");
auto func = m_graph_def->library().function(func_ind);
auto func_ptr = std::make_shared<::ov_tensorflow::FunctionDef>(func);
auto func_ptr = std::make_shared<::tensorflow::FunctionDef>(func);
return std::make_shared<GraphIteratorProto>(m_graph_def, func_ptr, m_library_map, m_checkpoint_v1_reader);
}

View File

@ -62,7 +62,7 @@ public:
if (!input_stream) {
return false;
}
auto graph_def = std::make_shared<::ov_tensorflow::GraphDef>();
auto graph_def = std::make_shared<::tensorflow::GraphDef>();
auto is_parsed = ::google::protobuf::TextFormat::Parse(input_stream.get(), graph_def.get()) && graph_def &&
graph_def->node_size() > 0;
return is_parsed;

View File

@ -10,26 +10,26 @@
#include <string>
#include "openvino/core/type/element_type.hpp"
#include "tensor_bundle.pb.h"
#include "trackable_object_graph.pb.h"
#include "ov_tensorflow/tensor_bundle.pb.h"
#include "ov_tensorflow/trackable_object_graph.pb.h"
namespace ov {
namespace frontend {
namespace tensorflow {
bool GraphIteratorSavedModel::is_valid_signature(const ::ov_tensorflow::SignatureDef& signature) const {
const std::map<::ov_tensorflow::DataType, ov::element::Type> types{
{::ov_tensorflow::DataType::DT_BOOL, ov::element::boolean},
{::ov_tensorflow::DataType::DT_INT16, ov::element::i16},
{::ov_tensorflow::DataType::DT_INT32, ov::element::i32},
{::ov_tensorflow::DataType::DT_INT64, ov::element::i64},
{::ov_tensorflow::DataType::DT_HALF, ov::element::f16},
{::ov_tensorflow::DataType::DT_FLOAT, ov::element::f32},
{::ov_tensorflow::DataType::DT_DOUBLE, ov::element::f64},
{::ov_tensorflow::DataType::DT_UINT8, ov::element::u8},
{::ov_tensorflow::DataType::DT_INT8, ov::element::i8},
{::ov_tensorflow::DataType::DT_BFLOAT16, ov::element::bf16},
{::ov_tensorflow::DataType::DT_STRING, ov::element::dynamic}};
bool GraphIteratorSavedModel::is_valid_signature(const ::tensorflow::SignatureDef& signature) const {
const std::map<::tensorflow::DataType, ov::element::Type> types{
{::tensorflow::DataType::DT_BOOL, ov::element::boolean},
{::tensorflow::DataType::DT_INT16, ov::element::i16},
{::tensorflow::DataType::DT_INT32, ov::element::i32},
{::tensorflow::DataType::DT_INT64, ov::element::i64},
{::tensorflow::DataType::DT_HALF, ov::element::f16},
{::tensorflow::DataType::DT_FLOAT, ov::element::f32},
{::tensorflow::DataType::DT_DOUBLE, ov::element::f64},
{::tensorflow::DataType::DT_UINT8, ov::element::u8},
{::tensorflow::DataType::DT_INT8, ov::element::i8},
{::tensorflow::DataType::DT_BFLOAT16, ov::element::bf16},
{::tensorflow::DataType::DT_STRING, ov::element::dynamic}};
for (const auto& it : signature.inputs()) {
if (it.second.name().empty() || types.find(it.second.dtype()) == types.end())

View File

@ -8,7 +8,7 @@
#include "graph_iterator_proto.hpp"
#include "openvino/util/file_util.hpp"
#include "saved_model.pb.h"
#include "ov_tensorflow/saved_model.pb.h"
#include "variables_index.hpp"
namespace ov {
@ -34,7 +34,7 @@ std::basic_string<wchar_t> get_variables_index_name<wchar_t>();
// Loads graph from Tensorflow Saved Model file (saved_model.pb)
class GraphIteratorSavedModel : public GraphIteratorProto {
std::shared_ptr<::ov_tensorflow::SavedModel> m_saved_model;
std::shared_ptr<::tensorflow::SavedModel> m_saved_model;
std::shared_ptr<VariablesIndex> m_variables_index;
std::shared_ptr<std::map<std::string, std::string>> m_inputs_map;
std::shared_ptr<std::map<std::string, std::string>> m_outputs_map;
@ -43,7 +43,7 @@ class GraphIteratorSavedModel : public GraphIteratorProto {
public:
template <typename T>
GraphIteratorSavedModel(const std::basic_string<T>& path, const std::string& tags, const bool mmap_enabled)
: m_saved_model(std::make_shared<::ov_tensorflow::SavedModel>()),
: m_saved_model(std::make_shared<::tensorflow::SavedModel>()),
m_mmap_enabled(mmap_enabled) {
this->read_saved_model(path, tags);
}
@ -66,7 +66,7 @@ public:
}
private:
bool is_valid_signature(const ::ov_tensorflow::SignatureDef& signature) const;
bool is_valid_signature(const ::tensorflow::SignatureDef& signature) const;
template <typename T>
bool read_saved_model(const std::basic_string<T>& path, const std::string& tags) {
@ -141,11 +141,11 @@ private:
}
/// \brief Does a loading of exact meta-graph
bool load_meta_graph(const ::ov_tensorflow::MetaGraphDef& meta_graph) {
std::map<std::string, const ::ov_tensorflow::SignatureDef*> validSignatures = {};
bool load_meta_graph(const ::tensorflow::MetaGraphDef& meta_graph) {
std::map<std::string, const ::tensorflow::SignatureDef*> validSignatures = {};
for (const auto& sit : meta_graph.signature_def()) {
const std::string& key = sit.first;
const ::ov_tensorflow::SignatureDef& val = sit.second;
const ::tensorflow::SignatureDef& val = sit.second;
if (is_valid_signature(val)) {
validSignatures[key] = &val;
}
@ -167,7 +167,7 @@ private:
}
}
m_graph_def = std::make_shared<::ov_tensorflow::GraphDef>(meta_graph.graph_def());
m_graph_def = std::make_shared<::tensorflow::GraphDef>(meta_graph.graph_def());
// Update variables map using information by resolving AssignVariableOp graph nodes
std::map<std::string, std::string> var_map;

View File

@ -10,7 +10,7 @@
#include "ngraph/runtime/shared_buffer.hpp"
#include "openvino/opsets/opset8.hpp"
#include "openvino/util/mmap_object.hpp"
#include "tensor_bundle.pb.h"
#include "ov_tensorflow/tensor_bundle.pb.h"
using namespace std;
using namespace ov::opset8;
@ -26,7 +26,7 @@ template <typename T>
static std::shared_ptr<ov::Node> read_variable(std::shared_ptr<VariablesIndex> var_index,
const ov::element::Type ov_type,
const ov::Shape shape,
const ::ov_tensorflow::BundleEntryProto& entry,
const ::tensorflow::BundleEntryProto& entry,
const NodeContext& node) {
google::protobuf::int64 size = 1;
for (uint64_t i = 0; i < shape.size(); ++i) {
@ -95,7 +95,7 @@ OutputVector translate_varhandle_op(const NodeContext& node) {
TENSORFLOW_OP_VALIDATION(node, result, "[TensorFlow Frontend] Internal error: Cannot find requested variable.");
::ov_tensorflow::BundleEntryProto entry;
::tensorflow::BundleEntryProto entry;
TENSORFLOW_OP_VALIDATION(node,
entry.ParseFromArray(entry_data, static_cast<int>(entry_size)),
"[TensorFlow Frontend] Internal error: Cannot get read bundle entry.");

View File

@ -14,13 +14,13 @@
#include "openvino/op/shape_of.hpp"
#include "openvino/op/slice.hpp"
#include "openvino/op/transpose.hpp"
#include "ov_tensorflow/xla_data.pb.h"
#include "utils.hpp"
#include "xla_data.pb.h"
using namespace std;
using namespace ov;
using namespace ov::op;
using namespace ov_xla;
using namespace xla;
namespace ov {
namespace frontend {

View File

@ -13,8 +13,8 @@
#include "openvino/op/shape_of.hpp"
#include "openvino/op/transpose.hpp"
#include "openvino/op/unsqueeze.hpp"
#include "ov_tensorflow/xla_data.pb.h"
#include "utils.hpp"
#include "xla_data.pb.h"
using namespace std;
using namespace ov;
@ -92,7 +92,7 @@ OutputVector translate_xla_dot_op(const NodeContext& node) {
auto rhs = node.get_input(1);
auto node_name = node.get_name();
auto dimension_numbers_message = node.get_attribute<string>("dimension_numbers");
::ov_xla::DotDimensionNumbers dimension_numbers;
::xla::DotDimensionNumbers dimension_numbers;
TENSORFLOW_OP_VALIDATION(
node,
dimension_numbers.ParseFromArray(dimension_numbers_message.data(),

View File

@ -12,7 +12,7 @@ limitations under the License.*/
syntax = "proto3";
package ov_tensorflow;
package tensorflow;
option cc_enable_arenas = true;
option java_outer_classname = "AllocationDescriptionProtos";

View File

@ -15,13 +15,13 @@ limitations under the License.*/
syntax = "proto3";
package ov_tensorflow;
package tensorflow;
option cc_enable_arenas = true;
option java_outer_classname = "ApiDefProtos";
option java_multiple_files = true;
option java_package = "org.tensorflow.framework";
option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework/api_def_go_proto";
import "attr_value.proto";
import "ov_tensorflow/attr_value.proto";
// Used to specify and override the default API & behavior in the
// generated code for client languages, from what you would get from

View File

@ -12,11 +12,11 @@ limitations under the License.*/
syntax = "proto3";
package ov_tensorflow;
package tensorflow;
import "tensor.proto";
import "tensor_shape.proto";
import "types.proto";
import "ov_tensorflow/tensor.proto";
import "ov_tensorflow/tensor_shape.proto";
import "ov_tensorflow/types.proto";
option cc_enable_arenas = true;
option java_outer_classname = "AttrValueProtos";

View File

@ -12,10 +12,10 @@ limitations under the License.*/
syntax = "proto3";
package ov_tensorflow;
package tensorflow;
import "tensor_shape.proto";
import "types.proto";
import "ov_tensorflow/tensor_shape.proto";
import "ov_tensorflow/types.proto";
option cc_enable_arenas = true;
option java_outer_classname = "CostGraphProtos";

View File

@ -12,7 +12,7 @@ limitations under the License.*/
syntax = "proto3";
package ov_tensorflow.data;
package tensorflow.data;
// Represents the type of auto-sharding we enable.
enum AutoShardPolicy {

View File

@ -12,7 +12,7 @@ limitations under the License.*/
syntax = "proto3";
package ov_tensorflow;
package tensorflow;
option cc_enable_arenas = true;
option java_outer_classname = "DeviceAttributesProtos";

View File

@ -12,11 +12,11 @@ limitations under the License.*/
syntax = "proto3";
package ov_tensorflow;
package tensorflow;
import "attr_value.proto";
import "node_def.proto";
import "op_def.proto";
import "ov_tensorflow/attr_value.proto";
import "ov_tensorflow/node_def.proto";
import "ov_tensorflow/op_def.proto";
option cc_enable_arenas = true;
option java_outer_classname = "FunctionProtos";

View File

@ -12,11 +12,11 @@ limitations under the License.*/
syntax = "proto3";
package ov_tensorflow;
package tensorflow;
import "function.proto";
import "node_def.proto";
import "versions.proto";
import "ov_tensorflow/function.proto";
import "ov_tensorflow/node_def.proto";
import "ov_tensorflow/versions.proto";
option cc_enable_arenas = true;
option java_outer_classname = "GraphProtos";

View File

@ -12,9 +12,9 @@ limitations under the License.*/
syntax = "proto3";
package ov_tensorflow;
package tensorflow;
import "types.proto";
import "ov_tensorflow/types.proto";
option cc_enable_arenas = true;
option java_outer_classname = "GraphTransferInfoProto";

View File

@ -12,9 +12,9 @@ limitations under the License.*/
syntax = "proto3";
package ov_tensorflow;
package tensorflow;
import "attr_value.proto";
import "ov_tensorflow/attr_value.proto";
option cc_enable_arenas = true;
option java_outer_classname = "KernelDefProtos";

View File

@ -12,9 +12,9 @@ limitations under the License.*/
syntax = "proto3";
package ov_tensorflow;
package tensorflow;
import "tensor_description.proto";
import "ov_tensorflow/tensor_description.proto";
option cc_enable_arenas = true;
option java_outer_classname = "LogMemoryProtos";

View File

@ -12,16 +12,16 @@ limitations under the License.*/
syntax = "proto3";
package ov_tensorflow;
package tensorflow;
import "google/protobuf/any.proto";
import "graph.proto";
import "op_def.proto";
import "tensor_shape.proto";
import "types.proto";
import "saved_object_graph.proto";
import "saver.proto";
import "struct.proto";
import "ov_tensorflow/graph.proto";
import "ov_tensorflow/op_def.proto";
import "ov_tensorflow/tensor_shape.proto";
import "ov_tensorflow/types.proto";
import "ov_tensorflow/saved_object_graph.proto";
import "ov_tensorflow/saver.proto";
import "ov_tensorflow/struct.proto";
option cc_enable_arenas = true;
option java_outer_classname = "MetaGraphProtos";

View File

@ -12,7 +12,7 @@ limitations under the License.*/
syntax = "proto3";
package ov_tensorflow.data.model;
package tensorflow.data.model;
option cc_enable_arenas = true;

View File

@ -12,9 +12,9 @@ limitations under the License.*/
syntax = "proto3";
package ov_tensorflow;
package tensorflow;
import "attr_value.proto";
import "ov_tensorflow/attr_value.proto";
option cc_enable_arenas = true;
option java_outer_classname = "NodeProto";

View File

@ -12,15 +12,16 @@ limitations under the License.*/
syntax = "proto3";
package ov_tensorflow;
package tensorflow;
option cc_enable_arenas = true;
option java_outer_classname = "OpDefProtos";
option java_multiple_files = true;
option java_package = "org.tensorflow.framework";
option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework/op_def_go_proto";
import "attr_value.proto";
import "types.proto";
import "resource_handle.proto";
import "ov_tensorflow/attr_value.proto";
import "ov_tensorflow/types.proto";
import "ov_tensorflow/resource_handle.proto";
// Defines an operation. A NodeDef in a GraphDef specifies an Op by
// using the "op" field which should match the name of a OpDef.

View File

@ -12,7 +12,7 @@ limitations under the License.*/
syntax = "proto3";
package ov_tensorflow;
package tensorflow;
option cc_enable_arenas = true;
option java_outer_classname = "ReaderBaseProtos";

View File

@ -12,11 +12,11 @@ limitations under the License.*/
syntax = "proto3";
package ov_tensorflow;
package tensorflow;
import "graph.proto";
import "tensor_shape.proto";
import "types.proto";
import "ov_tensorflow/graph.proto";
import "ov_tensorflow/tensor_shape.proto";
import "ov_tensorflow/types.proto";
option cc_enable_arenas = true;
option java_outer_classname = "RemoteFusedGraphExecuteInfoProto";

View File

@ -12,10 +12,10 @@ limitations under the License.*/
syntax = "proto3";
package ov_tensorflow;
package tensorflow;
import "tensor_shape.proto";
import "types.proto";
import "ov_tensorflow/tensor_shape.proto";
import "ov_tensorflow/types.proto";
option cc_enable_arenas = true;
option java_outer_classname = "ResourceHandle";

View File

@ -12,9 +12,9 @@ limitations under the License.*/
syntax = "proto3";
package ov_tensorflow;
package tensorflow;
import "meta_graph.proto";
import "ov_tensorflow/meta_graph.proto";
option cc_enable_arenas = true;
option java_outer_classname = "SavedModelProtos";

View File

@ -12,15 +12,15 @@ limitations under the License.*/
syntax = "proto3";
package ov_tensorflow;
package tensorflow;
import "google/protobuf/any.proto";
import "tensor_shape.proto";
import "types.proto";
import "variable.proto";
import "versions.proto";
import "struct.proto";
import "trackable_object_graph.proto";
import "ov_tensorflow/tensor_shape.proto";
import "ov_tensorflow/types.proto";
import "ov_tensorflow/variable.proto";
import "ov_tensorflow/versions.proto";
import "ov_tensorflow/struct.proto";
import "ov_tensorflow/trackable_object_graph.proto";
option cc_enable_arenas = true;
option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/protobuf/for_core_protos_go_proto";

View File

@ -29,17 +29,17 @@ limitations under the License.*/
syntax = "proto3";
package ov_tensorflow;
package tensorflow;
option cc_enable_arenas = true;
option java_outer_classname = "SavedTensorSliceProtos";
option java_multiple_files = true;
option java_package = "org.tensorflow.util";
import "tensor_shape.proto";
import "tensor_slice.proto";
import "tensor.proto";
import "types.proto";
import "versions.proto";
import "ov_tensorflow/tensor_shape.proto";
import "ov_tensorflow/tensor_slice.proto";
import "ov_tensorflow/tensor.proto";
import "ov_tensorflow/types.proto";
import "ov_tensorflow/versions.proto";
// Metadata describing the set of slices of the same tensor saved in a
// checkpoint file.

View File

@ -12,7 +12,7 @@ limitations under the License.*/
syntax = "proto3";
package ov_tensorflow;
package tensorflow;
option cc_enable_arenas = true;
option java_outer_classname = "SaverProtos";

View File

@ -12,10 +12,10 @@ limitations under the License.*/
syntax = "proto3";
package ov_tensorflow;
package tensorflow;
import "allocation_description.proto";
import "tensor_description.proto";
import "ov_tensorflow/allocation_description.proto";
import "ov_tensorflow/tensor_description.proto";
option cc_enable_arenas = true;
option java_outer_classname = "StepStatsProtos";

View File

@ -12,11 +12,11 @@ limitations under the License.*/
syntax = "proto3";
package ov_tensorflow;
package tensorflow;
import "tensor.proto";
import "tensor_shape.proto";
import "types.proto";
import "ov_tensorflow/tensor.proto";
import "ov_tensorflow/tensor_shape.proto";
import "ov_tensorflow/types.proto";
option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/protobuf/for_core_protos_go_proto";
@ -66,9 +66,9 @@ message StructuredValue {
bool bool_value = 14;
// Represents a TensorShape.
ov_tensorflow.TensorShapeProto tensor_shape_value = 31;
tensorflow.TensorShapeProto tensor_shape_value = 31;
// Represents an enum value for dtype.
ov_tensorflow.DataType tensor_dtype_value = 32;
tensorflow.DataType tensor_dtype_value = 32;
// Represents a value for tf.TensorSpec.
TensorSpecProto tensor_spec_value = 33;
// Represents a value for tf.TypeSpec.
@ -121,17 +121,17 @@ message NamedTupleValue {
// A protobuf to represent tf.TensorSpec.
message TensorSpecProto {
string name = 1;
ov_tensorflow.TensorShapeProto shape = 2;
ov_tensorflow.DataType dtype = 3;
tensorflow.TensorShapeProto shape = 2;
tensorflow.DataType dtype = 3;
}
// A protobuf to represent tf.BoundedTensorSpec.
message BoundedTensorSpecProto {
string name = 1;
ov_tensorflow.TensorShapeProto shape = 2;
ov_tensorflow.DataType dtype = 3;
ov_tensorflow.TensorProto minimum = 4;
ov_tensorflow.TensorProto maximum = 5;
tensorflow.TensorShapeProto shape = 2;
tensorflow.DataType dtype = 3;
tensorflow.TensorProto minimum = 4;
tensorflow.TensorProto maximum = 5;
}
// Represents a tf.TypeSpec

View File

@ -12,9 +12,9 @@ limitations under the License.*/
syntax = "proto3";
package ov_tensorflow;
package tensorflow;
import "tensor.proto";
import "ov_tensorflow/tensor.proto";
option cc_enable_arenas = true;
option java_outer_classname = "SummaryProtos";

View File

@ -12,11 +12,11 @@ limitations under the License.*/
syntax = "proto3";
package ov_tensorflow;
package tensorflow;
import "resource_handle.proto";
import "tensor_shape.proto";
import "types.proto";
import "ov_tensorflow/resource_handle.proto";
import "ov_tensorflow/tensor_shape.proto";
import "ov_tensorflow/types.proto";
option cc_enable_arenas = true;
option java_outer_classname = "TensorProtos";

View File

@ -12,12 +12,12 @@ limitations under the License.*/
syntax = "proto3";
package ov_tensorflow;
package tensorflow;
import "tensor_shape.proto";
import "tensor_slice.proto";
import "types.proto";
import "versions.proto";
import "ov_tensorflow/tensor_shape.proto";
import "ov_tensorflow/tensor_slice.proto";
import "ov_tensorflow/types.proto";
import "ov_tensorflow/versions.proto";
option cc_enable_arenas = true;
option java_outer_classname = "TensorBundleProtos";

View File

@ -12,11 +12,11 @@ limitations under the License.*/
syntax = "proto3";
package ov_tensorflow;
package tensorflow;
import "allocation_description.proto";
import "tensor_shape.proto";
import "types.proto";
import "ov_tensorflow/allocation_description.proto";
import "ov_tensorflow/tensor_shape.proto";
import "ov_tensorflow/types.proto";
option cc_enable_arenas = true;
option java_outer_classname = "TensorDescriptionProtos";

View File

@ -19,7 +19,7 @@ option java_multiple_files = true;
option java_package = "org.tensorflow.framework";
option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework/tensor_shape_go_proto";
package ov_tensorflow;
package tensorflow;
// Dimensions of a tensor.
message TensorShapeProto {

View File

@ -14,7 +14,7 @@ limitations under the License.*/
syntax = "proto3";
package ov_tensorflow;
package tensorflow;
option cc_enable_arenas = true;
option java_outer_classname = "TensorSliceProtos";

View File

@ -12,7 +12,7 @@ limitations under the License.*/
syntax = "proto3";
package ov_tensorflow;
package tensorflow;
import "google/protobuf/wrappers.proto";

View File

@ -12,7 +12,7 @@ limitations under the License.*/
syntax = "proto3";
package ov_tensorflow;
package tensorflow;
option cc_enable_arenas = true;
option java_outer_classname = "TypesProtos";
option java_multiple_files = true;

View File

@ -12,7 +12,7 @@ limitations under the License.*/
syntax = "proto3";
package ov_tensorflow;
package tensorflow;
option cc_enable_arenas = true;
option java_outer_classname = "VariableProtos";

View File

@ -12,7 +12,7 @@ limitations under the License.*/
syntax = "proto3";
package ov_tensorflow;
package tensorflow;
option cc_enable_arenas = true;
option java_outer_classname = "VersionsProtos";

View File

@ -15,7 +15,7 @@ limitations under the License.
syntax = "proto3";
package ov_xla;
package xla;
option cc_enable_arenas = true;

View File

@ -83,7 +83,7 @@ void extract_tensor_content(const string& tensor_content, Tensor* values) {
# pragma warning(disable : 4267) // possible loss of data
#endif
template <typename T>
void extract_compressed_tensor_content(const ::ov_tensorflow::TensorProto& tensor_proto,
void extract_compressed_tensor_content(const ::tensorflow::TensorProto& tensor_proto,
int64_t val_size,
Tensor* values) {
auto val_lastsaved = static_cast<T>(0);
@ -149,30 +149,30 @@ bool CfMarkerType::is_copyable() const {
return false;
}
Type get_ov_type(const ::ov_tensorflow::DataType& type) {
static const map<::ov_tensorflow::DataType, Type> type_map{{::ov_tensorflow::DataType::DT_BOOL, boolean},
{::ov_tensorflow::DataType::DT_INT16, i16},
{::ov_tensorflow::DataType::DT_INT32, i32},
{::ov_tensorflow::DataType::DT_INT64, i64},
{::ov_tensorflow::DataType::DT_HALF, f16},
{::ov_tensorflow::DataType::DT_FLOAT, f32},
{::ov_tensorflow::DataType::DT_DOUBLE, f64},
{::ov_tensorflow::DataType::DT_UINT8, u8},
{::ov_tensorflow::DataType::DT_INT8, i8},
{::ov_tensorflow::DataType::DT_BFLOAT16, bf16}};
Type get_ov_type(const ::tensorflow::DataType& type) {
static const map<::tensorflow::DataType, Type> type_map{{::tensorflow::DataType::DT_BOOL, boolean},
{::tensorflow::DataType::DT_INT16, i16},
{::tensorflow::DataType::DT_INT32, i32},
{::tensorflow::DataType::DT_INT64, i64},
{::tensorflow::DataType::DT_HALF, f16},
{::tensorflow::DataType::DT_FLOAT, f32},
{::tensorflow::DataType::DT_DOUBLE, f64},
{::tensorflow::DataType::DT_UINT8, u8},
{::tensorflow::DataType::DT_INT8, i8},
{::tensorflow::DataType::DT_BFLOAT16, bf16}};
auto it = type_map.find(type);
// for all unsupported types return dynamic type
return it == type_map.end() ? dynamic : it->second;
}
Any unpack_tensor_proto(const ::ov_tensorflow::TensorProto& tensor_proto) {
Any unpack_tensor_proto(const ::tensorflow::TensorProto& tensor_proto) {
return unpack_tensor_proto(tensor_proto, tensor_proto.tensor_shape(), tensor_proto.dtype());
}
Any unpack_tensor_proto(const ::ov_tensorflow::TensorProto& tensor_proto,
const ::ov_tensorflow::TensorShapeProto& tensor_shape,
const ::ov_tensorflow::DataType& tensor_type) {
Any unpack_tensor_proto(const ::tensorflow::TensorProto& tensor_proto,
const ::tensorflow::TensorShapeProto& tensor_shape,
const ::tensorflow::DataType& tensor_type) {
PartialShape pshape;
for (int i = 0; i < tensor_shape.dim_size(); i++) {
pshape.push_back(tensor_shape.dim(i).size());
@ -180,7 +180,7 @@ Any unpack_tensor_proto(const ::ov_tensorflow::TensorProto& tensor_proto,
FRONT_END_GENERAL_CHECK(pshape.is_static(), "Dynamic shapes are not supported for Tensor attribute.");
Type ov_type = get_ov_type(tensor_type);
if (tensor_type != ::ov_tensorflow::DataType::DT_STRING) {
if (tensor_type != ::tensorflow::DataType::DT_STRING) {
FRONT_END_GENERAL_CHECK(
ov_type.is_static(),
"Encountered unknown element type " + DataType_Name(tensor_type) + " on an empty tensor_proto");

View File

@ -4,8 +4,6 @@
#pragma once
#include "attr_value.pb.h"
#include "node_def.pb.h"
#include "openvino/core/node.hpp"
#include "openvino/core/partial_shape.hpp"
#include "openvino/core/runtime_attribute.hpp"
@ -14,9 +12,11 @@
#include "openvino/frontend/node_context.hpp"
#include "openvino/op/loop.hpp"
#include "openvino/runtime/tensor.hpp"
#include "tensor.pb.h"
#include "tensor_shape.pb.h"
#include "types.pb.h"
#include "ov_tensorflow/attr_value.pb.h"
#include "ov_tensorflow/node_def.pb.h"
#include "ov_tensorflow/tensor.pb.h"
#include "ov_tensorflow/tensor_shape.pb.h"
#include "ov_tensorflow/types.pb.h"
namespace ov {
namespace frontend {
@ -24,13 +24,13 @@ namespace tensorflow {
#define CF_MARKER_TAG "tf_cf_marker_tag"
ov::element::Type get_ov_type(const ::ov_tensorflow::DataType& type);
ov::element::Type get_ov_type(const ::tensorflow::DataType& type);
ov::Any unpack_tensor_proto(const ::ov_tensorflow::TensorProto& tensor_proto);
ov::Any unpack_tensor_proto(const ::tensorflow::TensorProto& tensor_proto);
ov::Any unpack_tensor_proto(const ::ov_tensorflow::TensorProto& tensor_proto,
const ::ov_tensorflow::TensorShapeProto& tensor_shape,
const ::ov_tensorflow::DataType& tensor_type);
ov::Any unpack_tensor_proto(const ::tensorflow::TensorProto& tensor_proto,
const ::tensorflow::TensorShapeProto& tensor_shape,
const ::tensorflow::DataType& tensor_type);
class Switch;
using SetOfSwitchNodes = std::unordered_set<std::shared_ptr<Switch>>;

View File

@ -11,8 +11,8 @@
#include "graph_iterator_saved_model.hpp"
#include "openvino/core/type/element_type.hpp"
#include "openvino/util/mmap_object.hpp"
#include "tensor_bundle.pb.h"
#include "trackable_object_graph.pb.h"
#include "ov_tensorflow/tensor_bundle.pb.h"
#include "ov_tensorflow/trackable_object_graph.pb.h"
#ifdef ENABLE_SNAPPY_COMPRESSION
# include "snappy.h"
@ -126,7 +126,7 @@ void VariablesIndex::read_bundle_header() {
auto item = m_variables_index.find("");
FRONT_END_GENERAL_CHECK(item != m_variables_index.end(), "Bundle Header isn't found in index");
::ov_tensorflow::BundleHeaderProto bundleHeader;
::tensorflow::BundleHeaderProto bundleHeader;
FRONT_END_GENERAL_CHECK(bundleHeader.ParseFromArray(item->second.data(), static_cast<int>(item->second.size())),
"Bundle Header: Cannot parse Bundle Header");
FRONT_END_GENERAL_CHECK(bundleHeader.version().producer() == 1, "Bundle Header: Unsupported producer version");
@ -145,7 +145,7 @@ void VariablesIndex::read_checkpointable_object_graph() {
return;
}
::ov_tensorflow::BundleEntryProto entry;
::tensorflow::BundleEntryProto entry;
FRONT_END_GENERAL_CHECK(entry.ParseFromArray(item->second.data(), static_cast<int>(item->second.size())),
"CMO: Cannot parse Bundle Entry");
@ -155,7 +155,7 @@ void VariablesIndex::read_checkpointable_object_graph() {
FRONT_END_GENERAL_CHECK(shard != m_data_files.end(), "CMO: data files isn't found");
std::vector<char> data(entry.size());
::ov_tensorflow::TrackableObjectGraph tog;
::tensorflow::TrackableObjectGraph tog;
// TODO: have to understand this offset
// It looks like reinterpret_cast artifact
@ -244,13 +244,13 @@ bool VariablesIndex::read_variables(std::ifstream& vi_stream, const std::wstring
struct PtrNode {
using SharedPtrNode = std::shared_ptr<PtrNode>;
const ::ov_tensorflow::NodeDef* node;
const ::tensorflow::NodeDef* node;
std::vector<SharedPtrNode> inputs;
std::vector<SharedPtrNode> outputs;
PtrNode() : node(nullptr), inputs(), outputs() {}
PtrNode(const ::ov_tensorflow::NodeDef& src_node) {
PtrNode(const ::tensorflow::NodeDef& src_node) {
node = &src_node;
}
@ -308,14 +308,14 @@ struct PtrNode {
}
};
static void read_stateful_partitioned_call(const std::shared_ptr<::ov_tensorflow::GraphDef> graph_def,
const ::ov_tensorflow::NodeDef& partCall,
static void read_stateful_partitioned_call(const std::shared_ptr<::tensorflow::GraphDef> graph_def,
const ::tensorflow::NodeDef& partCall,
std::map<std::string, PtrNode::SharedPtrNode>& node_dictionary) {
FRONT_END_GENERAL_CHECK(partCall.op() == "StatefulPartitionedCall", "Passed node isn't StatefulPartitionedCall");
std::string func_name = partCall.attr().at("f").func().name();
const ::ov_tensorflow::FunctionDef* func_def = nullptr;
const ::tensorflow::FunctionDef* func_def = nullptr;
for (const auto& func : graph_def->library().function()) {
if (func.signature().name() == func_name) {
func_def = &func;
@ -365,7 +365,7 @@ static void read_stateful_partitioned_call(const std::shared_ptr<::ov_tensorflow
}
}
void VariablesIndex::map_assignvariable(const std::shared_ptr<::ov_tensorflow::GraphDef> graph_def,
void VariablesIndex::map_assignvariable(const std::shared_ptr<::tensorflow::GraphDef> graph_def,
std::map<std::string, std::string>& variables_map) {
std::map<std::string, PtrNode::SharedPtrNode> nodes;

View File

@ -9,7 +9,7 @@
#include "graph_iterator_proto.hpp"
#include "openvino/util/file_util.hpp"
#include "openvino/util/mmap_object.hpp"
#include "saved_model.pb.h"
#include "ov_tensorflow/saved_model.pb.h"
namespace ov {
namespace frontend {
@ -139,7 +139,7 @@ public:
/// It needs to map VarHandleOp to right place in .index file.
/// \param[in] graph_def GraphDef object for analysis
/// \param[out] variables_map Map of variables found in graph_def
static void map_assignvariable(const std::shared_ptr<::ov_tensorflow::GraphDef> graph_def,
static void map_assignvariable(const std::shared_ptr<::tensorflow::GraphDef> graph_def,
std::map<std::string, std::string>& variables_map);
private: