[TF FE] Added MetaGraph file format (#16524)

* Separeted SavedModelVariablesIndex class from Saved Model

* Renamed SavedModelVariablesIndex class

* Enabled Tensorflow MetaGraph

* Enabled Tensorflow MetaGraph

* Covered VariableV2 and Assign nodes

* Applied review comments

* Added tests

* Added names to input/output ports too

* Fixed naming for using with MO

* Applied part of review comments

* Renamed meta.cpp and saved_model.cpp

* Applied shared_ptr for memory management of PtrNode

* Fixing CI

* Prevent cycles while passing thru graph

* Released requirement for Checkpointable Object Graph

* Changed naming approach to align port order

* Changed renaming order (before reordering)

* Added a Placeholder translator which checks updated shape

* WA missing Identity name

* Fix CI and restored lost translators after rebase

* WA for output names

* Removing unused params after cutting a model

* Prevents crash in case VariableV2 appears in freezed model

* Fixed saved model in case no variables.index found, but
variables exists

* Changed approach for handling native formats support

* Aligned behavior with freezing .meta files

* Fixed behavior for cutting a model by input tensor

* Applied review comments
This commit is contained in:
Georgy Krivoruchko 2023-04-25 13:46:06 +04:00 committed by GitHub
parent 9c01de4b6e
commit 3f07c8b48b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
27 changed files with 1015 additions and 388 deletions

View File

@ -176,7 +176,7 @@ ov::Any DecoderProto::get_attribute(const std::string& name) const {
if (list.type(idx) != ::tensorflow::DataType::DT_STRING) {
res.emplace_back(get_ov_type(list.type(idx)));
} else {
res.emplace_back(ov::element::undefined);
res.emplace_back(ov::element::dynamic);
}
}
return res;
@ -208,13 +208,8 @@ ov::Any DecoderProto::get_attribute(const std::string& name) const {
ov_type.is_static(),
"Encountered unknown element type " + DataType_Name(tf_type) + " on an empty tensor_proto");
} else {
ov_type = ov::element::u64;
pshape.resize(0);
pshape.push_back(tensor_proto.string_val_size());
}
if (tf_type == ::tensorflow::DataType::DT_STRING) {
auto data = std::vector<std::string>();
for (auto& item : tensor_proto.string_val()) {
for (const auto& item : tensor_proto.string_val()) {
data.push_back(item);
}
return data;

View File

@ -4,6 +4,7 @@
#include "openvino/frontend/tensorflow/frontend.hpp"
#include "graph_iterator_meta.hpp"
#include "graph_iterator_proto.hpp"
#include "graph_iterator_proto_txt.hpp"
#include "graph_iterator_saved_model.hpp"
@ -103,6 +104,8 @@ bool FrontEnd::supported_impl(const std::vector<ov::Any>& variants) const {
return true;
} else if (GraphIteratorSavedModel::is_supported(model_path)) {
return true;
} else if (ov::util::ends_with(model_path, ".meta") && GraphIteratorMeta::is_supported(model_path)) {
return true;
} else if (GraphIteratorProtoTxt::is_supported(model_path)) {
// handle text protobuf format
return true;
@ -110,15 +113,17 @@ bool FrontEnd::supported_impl(const std::vector<ov::Any>& variants) const {
}
#if defined(OPENVINO_ENABLE_UNICODE_PATH_SUPPORT) && defined(_WIN32)
else if (variants[0].is<std::wstring>()) {
std::wstring suffix = L".pb";
std::wstring model_path = variants[0].as<std::wstring>();
if (ov::util::ends_with(model_path, suffix) && GraphIteratorProto::is_supported(model_path)) {
if (ov::util::ends_with(model_path, std::wstring(L".pb")) && GraphIteratorProto::is_supported(model_path)) {
// handle binary protobuf format with a path in Unicode
// for automatic deduction of the frontend to convert the model
// we have more strict rule that is to have `.pb` extension in the path
return true;
} else if (GraphIteratorSavedModel::is_supported(model_path)) {
return true;
} else if (ov::util::ends_with(model_path, std::wstring(L".meta")) &&
GraphIteratorMeta::is_supported(model_path)) {
return true;
} else if (GraphIteratorProtoTxt::is_supported(model_path)) {
// handle text protobuf format
return true;
@ -157,7 +162,16 @@ ov::frontend::InputModel::Ptr FrontEnd::load_impl(const std::vector<ov::Any>& va
m_telemetry,
graph_iterator->get_variables_index(),
graph_iterator->get_saved_model_input_names(),
graph_iterator->get_saved_model_output_names());
graph_iterator->get_saved_model_output_names(),
true);
} else if (GraphIteratorMeta::is_supported(model_path)) {
auto graph_iterator = std::make_shared<GraphIteratorMeta>(model_path);
return std::make_shared<InputModel>(graph_iterator,
m_telemetry,
graph_iterator->get_variables_index(),
graph_iterator->get_metagraph_input_names(),
graph_iterator->get_metagraph_output_names(),
true);
} else if (GraphIteratorProtoTxt::is_supported(model_path)) {
// handle text protobuf format
return std::make_shared<InputModel>(std::make_shared<GraphIteratorProtoTxt>(model_path), m_telemetry);
@ -182,7 +196,16 @@ ov::frontend::InputModel::Ptr FrontEnd::load_impl(const std::vector<ov::Any>& va
m_telemetry,
graph_iterator->get_variables_index(),
graph_iterator->get_saved_model_input_names(),
graph_iterator->get_saved_model_output_names());
graph_iterator->get_saved_model_output_names(),
true);
} else if (GraphIteratorMeta::is_supported(model_path)) {
auto graph_iterator = std::make_shared<GraphIteratorMeta>(model_path);
return std::make_shared<InputModel>(graph_iterator,
m_telemetry,
graph_iterator->get_variables_index(),
graph_iterator->get_metagraph_input_names(),
graph_iterator->get_metagraph_output_names(),
true);
} else if (GraphIteratorProtoTxt::is_supported(model_path)) {
// handle text protobuf format with a path in Unicode
return std::make_shared<InputModel>(std::make_shared<GraphIteratorProtoTxt>(model_path), m_telemetry);

View File

@ -0,0 +1,73 @@
// Copyright (C) 2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "graph_iterator_meta.hpp"
#include <stdlib.h>
#include <fstream>
#include <string>
#include "openvino/core/type/element_type.hpp"
#include "tensor_bundle.pb.h"
#include "trackable_object_graph.pb.h"
namespace ov {
namespace frontend {
namespace tensorflow {
bool GraphIteratorMeta::is_valid_signature(const ::tensorflow::SignatureDef& signature) const {
const std::map<::tensorflow::DataType, ov::element::Type> types{
{::tensorflow::DataType::DT_BOOL, ov::element::boolean},
{::tensorflow::DataType::DT_INT16, ov::element::i16},
{::tensorflow::DataType::DT_INT32, ov::element::i32},
{::tensorflow::DataType::DT_INT64, ov::element::i64},
{::tensorflow::DataType::DT_HALF, ov::element::f16},
{::tensorflow::DataType::DT_FLOAT, ov::element::f32},
{::tensorflow::DataType::DT_DOUBLE, ov::element::f64},
{::tensorflow::DataType::DT_UINT8, ov::element::u8},
{::tensorflow::DataType::DT_INT8, ov::element::i8},
{::tensorflow::DataType::DT_BFLOAT16, ov::element::bf16},
{::tensorflow::DataType::DT_STRING, ov::element::dynamic}};
for (const auto& it : signature.inputs()) {
if (it.second.name().empty() || types.find(it.second.dtype()) == types.end())
return false;
}
for (const auto& it : signature.outputs()) {
if (it.second.name().empty() || types.find(it.second.dtype()) == types.end())
return false;
}
return true;
}
bool GraphIteratorMeta::is_supported(const std::string& path) {
std::ifstream mg_stream(path, std::ios::in | std::ifstream::binary);
auto metagraph_def = std::make_shared<::tensorflow::MetaGraphDef>();
return mg_stream && mg_stream.is_open() && metagraph_def->ParsePartialFromIstream(&mg_stream);
}
#if defined(OPENVINO_ENABLE_UNICODE_PATH_SUPPORT) && defined(_WIN32)
bool GraphIteratorMeta::is_supported(const std::wstring& path) {
std::ifstream mg_stream(path, std::ios::in | std::ifstream::binary);
auto metagraph_def = std::make_shared<::tensorflow::MetaGraphDef>();
return mg_stream && mg_stream.is_open() && metagraph_def->ParsePartialFromIstream(&mg_stream);
}
#endif
template <>
std::basic_string<char> get_variables_index_name<char>(const std::string name) {
return name + ".index";
}
#if defined(OPENVINO_ENABLE_UNICODE_PATH_SUPPORT) && defined(_WIN32)
template <>
std::basic_string<wchar_t> get_variables_index_name<wchar_t>(const std::wstring name) {
return name + L".index";
}
#endif
} // namespace tensorflow
} // namespace frontend
} // namespace ov

View File

@ -0,0 +1,126 @@
// Copyright (C) 2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <map>
#include "graph_iterator_proto.hpp"
#include "openvino/util/file_util.hpp"
#include "variables_index.hpp"
namespace ov {
namespace frontend {
namespace tensorflow {
template <typename T>
std::basic_string<T> get_variables_index_name(const std::basic_string<T> name) {}
template <>
std::basic_string<char> get_variables_index_name<char>(const std::string name);
#if defined(OPENVINO_ENABLE_UNICODE_PATH_SUPPORT) && defined(_WIN32)
template <>
std::basic_string<wchar_t> get_variables_index_name<wchar_t>(const std::wstring name);
#endif
// Loads graph from Tensorflow MetaGraph file (*.meta)
class GraphIteratorMeta : public GraphIteratorProto {
std::shared_ptr<::tensorflow::MetaGraphDef> m_metagraph_def;
std::shared_ptr<VariablesIndex> m_variables_index;
std::shared_ptr<std::map<std::string, std::string>> m_inputs_map;
std::shared_ptr<std::map<std::string, std::string>> m_outputs_map;
public:
template <typename T>
GraphIteratorMeta(const std::basic_string<T>& path)
: m_metagraph_def(std::make_shared<::tensorflow::MetaGraphDef>()) {
this->read_meta(path);
}
static bool is_supported(const std::string& path);
#if defined(OPENVINO_ENABLE_UNICODE_PATH_SUPPORT) && defined(_WIN32)
static bool is_supported(const std::wstring& path);
#endif
std::shared_ptr<VariablesIndex> get_variables_index() {
return m_variables_index;
}
std::shared_ptr<std::map<std::string, std::string>> get_metagraph_input_names() const {
return m_inputs_map;
}
std::shared_ptr<std::map<std::string, std::string>> get_metagraph_output_names() const {
return m_outputs_map;
}
private:
bool is_valid_signature(const ::tensorflow::SignatureDef& signature) const;
template <typename T>
bool read_meta(const std::basic_string<T>& path) {
std::basic_string<T> model_path = path.substr(0, path.find_last_of('.'));
std::ifstream mg_stream{path, std::ifstream::in | std::ifstream::binary};
FRONT_END_GENERAL_CHECK(mg_stream && mg_stream.is_open(), "Model file does not exist");
std::basic_string<T> varIndexPath = get_variables_index_name<T>(model_path);
if (ov::util::file_exists(varIndexPath)) {
m_variables_index = std::make_shared<VariablesIndex>();
std::ifstream vi_stream{varIndexPath, std::ifstream::in | std::ifstream::binary};
FRONT_END_GENERAL_CHECK(vi_stream && vi_stream.is_open(), "MetaGraph's variable index file does not exist");
FRONT_END_GENERAL_CHECK(m_variables_index->read_variables(vi_stream, model_path, false),
"MetaGraph's variable index file cannot be parsed");
}
bool res = m_metagraph_def->ParseFromIstream(&mg_stream);
FRONT_END_GENERAL_CHECK(res && m_metagraph_def->has_graph_def(), "MetaGraph cannot be parsed");
std::map<std::string, const ::tensorflow::SignatureDef*> validSignatures = {};
for (const auto& sit : m_metagraph_def->signature_def()) {
const std::string& key = sit.first;
const ::tensorflow::SignatureDef& val = sit.second;
if (is_valid_signature(val)) {
validSignatures[key] = &val;
}
}
auto serving_default = validSignatures.find("serving_default");
if (serving_default != validSignatures.end()) {
/*
"serving_default" signature contains map of input/output names.
Here we are storing two maps for inputs and outputs.
Map looks like "name_set_by_user" = "internal_name:port".
For example, "input_mask" = "serving_default_input_mask:0"
*/
m_inputs_map = std::make_shared<std::map<std::string, std::string>>();
m_outputs_map = std::make_shared<std::map<std::string, std::string>>();
for (const auto& input : serving_default->second->inputs()) {
(*m_inputs_map)[input.second.name()] = input.first;
}
for (const auto& output : serving_default->second->outputs()) {
(*m_outputs_map)[output.second.name()] = output.first;
}
}
m_graph_def = std::make_shared<::tensorflow::GraphDef>(m_metagraph_def->graph_def());
// Update variables map using information by resolving AssignVariableOp graph nodes
std::map<std::string, std::string> var_map;
VariablesIndex::map_assignvariable(m_graph_def, var_map);
for (auto var : var_map) {
m_variables_index->map_variable(var.first, var.second);
}
initialize_decoders_and_library();
return true;
}
}; // GraphIteratorMeta
} // namespace tensorflow
} // namespace frontend
} // namespace ov

View File

@ -0,0 +1,77 @@
// Copyright (C) 2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "graph_iterator_saved_model.hpp"
#include <stdlib.h>
#include <fstream>
#include <string>
#include "openvino/core/type/element_type.hpp"
#include "tensor_bundle.pb.h"
#include "trackable_object_graph.pb.h"
namespace ov {
namespace frontend {
namespace tensorflow {
bool GraphIteratorSavedModel::is_valid_signature(const ::tensorflow::SignatureDef& signature) const {
const std::map<::tensorflow::DataType, ov::element::Type> types{
{::tensorflow::DataType::DT_BOOL, ov::element::boolean},
{::tensorflow::DataType::DT_INT16, ov::element::i16},
{::tensorflow::DataType::DT_INT32, ov::element::i32},
{::tensorflow::DataType::DT_INT64, ov::element::i64},
{::tensorflow::DataType::DT_HALF, ov::element::f16},
{::tensorflow::DataType::DT_FLOAT, ov::element::f32},
{::tensorflow::DataType::DT_DOUBLE, ov::element::f64},
{::tensorflow::DataType::DT_UINT8, ov::element::u8},
{::tensorflow::DataType::DT_INT8, ov::element::i8},
{::tensorflow::DataType::DT_BFLOAT16, ov::element::bf16},
{::tensorflow::DataType::DT_STRING, ov::element::dynamic}};
for (const auto& it : signature.inputs()) {
if (it.second.name().empty() || types.find(it.second.dtype()) == types.end())
return false;
}
for (const auto& it : signature.outputs()) {
if (it.second.name().empty() || types.find(it.second.dtype()) == types.end())
return false;
}
return true;
}
bool GraphIteratorSavedModel::is_supported(const std::string& path) {
return ov::util::directory_exists(path) && ov::util::file_exists(ov::util::path_join({path, "saved_model.pb"}));
}
#if defined(OPENVINO_ENABLE_UNICODE_PATH_SUPPORT) && defined(_WIN32)
bool GraphIteratorSavedModel::is_supported(const std::wstring& path) {
return ov::util::directory_exists(path) && ov::util::file_exists(ov::util::path_join_w({path, L"saved_model.pb"}));
}
#endif
template <>
std::basic_string<char> get_saved_model_name<char>() {
return "/saved_model.pb";
}
template <>
std::basic_string<char> get_variables_index_name<char>() {
return "/variables/variables.index";
}
#if defined(OPENVINO_ENABLE_UNICODE_PATH_SUPPORT) && defined(_WIN32)
template <>
std::basic_string<wchar_t> get_saved_model_name<wchar_t>() {
return L"/saved_model.pb";
}
template <>
std::basic_string<wchar_t> get_variables_index_name<wchar_t>() {
return L"/variables/variables.index";
}
#endif
} // namespace tensorflow
} // namespace frontend
} // namespace ov

View File

@ -9,13 +9,12 @@
#include "graph_iterator_proto.hpp"
#include "openvino/util/file_util.hpp"
#include "saved_model.pb.h"
#include "variables_index.hpp"
namespace ov {
namespace frontend {
namespace tensorflow {
struct VIBlock;
template <typename T>
std::basic_string<T> get_saved_model_name() {}
template <typename T>
@ -33,130 +32,10 @@ template <>
std::basic_string<wchar_t> get_variables_index_name<wchar_t>();
#endif
// Stores information about variables index
class SavedModelVariablesIndex {
// Contains maximum amount of shards, used for creating corrext extension
int32_t m_total_shards;
// Contains BundleEntryProto variables list, readed from .index file
std::map<std::string, std::vector<char>> m_variables_index;
// List of opened data files for using with BundleEntryProto
std::map<int32_t, std::shared_ptr<std::ifstream>> m_data_files;
// List of mapped variables which could be read using TrackableObjectGraph
std::map<std::string, std::string> m_variables_map;
public:
/// \brief Reads variables from opened variable index file. Can cause an asserts in case of issues.
/// \param vi_stream Opened stream file, file pointer doesn't matter, it will be rewind internally.
/// \param path A path to file with variables data
/// \returns Returns true in case of everything loads successfully, false otherwise
bool read_variables(std::ifstream& vi_stream, const std::string& path);
#if defined(OPENVINO_ENABLE_UNICODE_PATH_SUPPORT) && defined(_WIN32)
/// \brief Reads variables from opened variable index file. Can cause an asserts in case of issues.
/// \param vi_stream Opened stream file, file pointer doesn't matter, it will be rewind internally.
/// \param path A path to file with variables data
/// \returns Returns true in case of everything loads successfully, false otherwise
bool read_variables(std::ifstream& vi_stream, const std::wstring& path);
#endif
/// \brief Returns data and size of data of stored variable
/// \param name Name of variable
/// \param data Pointer on a pointer where data pointer will be returned
/// \param size Pointer on a variable which will stores data size
/// \returns Returns true in case variable was found, false otherwise (data and size will be untouched)
bool get_variable(const std::string& name, const char** data, size_t* size) const {
auto varItem = m_variables_index.find(name);
if (varItem == m_variables_index.end()) {
return false;
}
if (data != nullptr) {
*data = varItem->second.data();
}
if (size != nullptr) {
*size = varItem->second.size();
}
return true;
}
/// \brief Returns data and size of data of mapped variable from trackable object graph to variables index
/// \param name Name of a mapping variable
/// \param data Pointer on a pointer where data pointer will be returned
/// \param size Pointer on a variable which will stores data size
/// \returns Returns true in case variable was found, false otherwise (data and size will be untouched)
bool get_mapped_variable(const std::string& name, const char** data, size_t* size) const {
auto mapItem = m_variables_map.find(name);
if (mapItem == m_variables_map.end()) {
return false;
}
return get_variable(mapItem->second, data, size);
}
/// \brief Checks if variable has a mapped pair
/// \param name Name of variable for checking existance
/// \returns True in case variable has mapped value and false otherwise
bool has_mapped_variable(const std::string& name) const {
auto mapItem = m_variables_map.find(name);
return mapItem != m_variables_map.end();
}
/// \brief Returns shared pointer to a requested shard_id, or nullptr in case of shard_id isn't found
/// \param shard_id Requested shard_id
/// \returns Valid shared_ptr with ifstream or with nullptr if shard isn't found
std::shared_ptr<std::ifstream> get_data_file(const int32_t shard_id) const {
auto result = m_data_files.find(shard_id);
return result != m_data_files.end() ? result->second : nullptr;
}
/// \brief Adds variable mapping to the variables map
/// \param var_name Variable full name (from .index file)
/// \param map_name Mapped name
/// \param rewrite Rewrite mapped value in case it exists
/// \returns True if map updated. False if nothing changed (if variable exists and rewrite is false).
bool map_variable(const std::string& var_name, const std::string& map_name, bool rewrite = false) {
if (m_variables_map.find(var_name) != m_variables_map.end() && rewrite == false) {
return false;
}
m_variables_map[var_name] = map_name;
return true;
}
private:
/// \brief Reads block structure of .index file
/// \param[in,out] fs Filestream of .index file, position in file will be updated
/// \param[in] index Variables index block which stores information about block
/// \param[out] data Block data will be readed
/// \param[out] offset Offset of block start
/// \param[out] offset_end Offset of block end
void read_variables_index_block(std::ifstream& fs,
const VIBlock& index,
std::vector<char>& data,
uint32_t& offset,
uint32_t& offset_end);
/// \brief Reads key=value pair from provided pointer
/// \param[in,out] ptr Actual pointer, will be moved to the end of readed pair (to read next)
/// \param[in] ptr_end End of memory which shouldn't be passed in case of broken structure
/// \param[out] key Key name
/// \param[out] value Stored value for key (isn't a pure string, data block)
/// \param[out] val_lenght Length of readed value
void read_variables_index_pair(char*& ptr,
const char* ptr_end,
std::string& key,
char*& value,
uint32_t& val_length);
/// \brief Reads .index file and stores key=value map in provided varIndex
/// \param[in,out] fs Filestream should be parsed. Position in file will be updated
/// \param[out] varIndex Variables indx (key=value) from given filestream
void read_variables_index(std::ifstream& fs, std::map<std::string, std::vector<char>>& varIndex);
/// \brief Reads bundle header if it is available. Checks version and saves info about amount of shards
void read_bundle_header();
/// \brief Reads key=value map from storef _CHECKPOINTABLE_OBJECT_GRAPH variable
void read_checkpointable_object_graph();
};
// Loads graph from Tensorflow Saved Model file (saved_model.pb)
class GraphIteratorSavedModel : public GraphIteratorProto {
std::shared_ptr<::tensorflow::SavedModel> m_saved_model;
std::shared_ptr<SavedModelVariablesIndex> m_variables_index;
std::shared_ptr<VariablesIndex> m_variables_index;
std::shared_ptr<std::map<std::string, std::string>> m_inputs_map;
std::shared_ptr<std::map<std::string, std::string>> m_outputs_map;
@ -172,7 +51,7 @@ public:
static bool is_supported(const std::wstring& path);
#endif
std::shared_ptr<SavedModelVariablesIndex> get_variables_index() {
std::shared_ptr<VariablesIndex> get_variables_index() {
return m_variables_index;
}
@ -194,7 +73,7 @@ private:
std::basic_string<T> varIndexPath = path + get_variables_index_name<T>();
if (ov::util::file_exists(varIndexPath)) {
m_variables_index = std::make_shared<SavedModelVariablesIndex>();
m_variables_index = std::make_shared<VariablesIndex>();
std::ifstream vi_stream{varIndexPath, std::ifstream::in | std::ifstream::binary};
FRONT_END_GENERAL_CHECK(vi_stream && vi_stream.is_open(),
"Saved Model's variable index file does not exist");
@ -232,6 +111,9 @@ private:
}
}
// MetaGraph may have a list of signatures, but at this moment we need information only about
// "serving_default" signature which contains information about inputs/outputs names for the
// model. Situation when it is missing in a file also could be.
auto serving_default = validSignatures.find("serving_default");
if (serving_default != validSignatures.end()) {
@ -249,24 +131,14 @@ private:
// Update variables map using information by resolving AssignVariableOp graph nodes
std::map<std::string, std::string> var_map;
map_assignvariable(m_graph_def, var_map);
for (auto var : var_map) {
m_variables_index->map_variable(var.first, var.second);
VariablesIndex::map_assignvariable(m_graph_def, var_map);
if (var_map.size() > 0 && m_variables_index.get() != nullptr) {
for (auto var : var_map) {
m_variables_index->map_variable(var.first, var.second);
}
}
auto nodes_size = m_graph_def->node_size();
m_decoders.resize(static_cast<size_t>(nodes_size));
for (int node_ind = 0; node_ind < nodes_size; ++node_ind) {
m_decoders[node_ind] = std::make_shared<DecoderProto>(&m_graph_def->node(node_ind), m_graph_def);
}
// initialize a library map
auto num_funcs = m_graph_def->library().function_size();
for (int func_ind = 0; func_ind < num_funcs; ++func_ind) {
auto func = m_graph_def->library().function(func_ind);
auto func_name = func.signature().name();
m_library_map.insert(std::pair<std::string, int>(func_name, func_ind));
}
initialize_decoders_and_library();
return true;
}
@ -275,15 +147,6 @@ private:
return false;
}
/// \brief Reads relationship between VarHandleOp - RestoreV2 - AssignVariableOp and
/// stores this information in a provided key=value map. Where key - name of VarHandleOp,
/// value - long variable name which is stored in RestoreV2.
/// It needs to map VarHandleOp to right place in .index file.
/// \param[in] graph_def GraphDef object for analysis
/// \param[out] variables_map Map of variables found in graph_def
void map_assignvariable(const std::shared_ptr<::tensorflow::GraphDef> graph_def,
std::map<std::string, std::string>& variables_map) const;
}; // GraphIteratorSavedModel
} // namespace tensorflow

View File

@ -56,9 +56,10 @@ public:
InputModelTFImpl(const GraphIterator::Ptr& graph_iterator,
const ov::frontend::InputModel& input_model,
const std::shared_ptr<TelemetryExtension>& telemetry,
const std::shared_ptr<SavedModelVariablesIndex>& variables_index,
const std::shared_ptr<VariablesIndex>& variables_index,
const std::shared_ptr<std::map<std::string, std::string>> saved_model_input_names,
const std::shared_ptr<std::map<std::string, std::string>> saved_model_output_names);
const std::shared_ptr<std::map<std::string, std::string>> saved_model_output_names,
const bool native_format = false);
std::vector<ov::frontend::Place::Ptr> get_inputs() const;
std::vector<ov::frontend::Place::Ptr> get_outputs() const;
ov::frontend::Place::Ptr get_place_by_tensor_name(const std::string& tensorName) const;
@ -72,7 +73,7 @@ public:
ov::element::Type get_element_type(ov::frontend::Place::Ptr place) const;
void set_tensor_value(ov::frontend::Place::Ptr place, const void* value);
std::vector<std::shared_ptr<OpPlace>> get_op_places() const;
std::vector<std::shared_ptr<OpPlace>> get_op_places();
std::map<std::string, std::shared_ptr<TensorPlace>> get_tensor_places() const {
return m_tensor_places;
}
@ -82,13 +83,13 @@ public:
std::shared_ptr<InputModel> get_body_input_model(const std::string& body_model_name) const;
std::vector<std::string> get_input_names() const;
std::vector<std::string> get_output_names() const;
std::shared_ptr<SavedModelVariablesIndex> get_variables_index() const;
std::shared_ptr<VariablesIndex> get_variables_index() const;
std::shared_ptr<std::map<std::string, std::string>> get_saved_model_input_names() const;
std::shared_ptr<std::map<std::string, std::string>> get_saved_model_output_names() const;
private:
void load_places();
std::vector<std::shared_ptr<OpPlace>> topologically_sort_op_nodes() const;
std::vector<std::shared_ptr<OpPlace>> topologically_sort_op_nodes();
std::vector<std::shared_ptr<OpPlace>> m_op_places;
std::map<std::string, std::shared_ptr<OpPlace>> m_op_places_map;
@ -101,14 +102,18 @@ private:
const ov::frontend::InputModel& m_input_model;
std::vector<std::string> m_input_names;
std::unordered_set<std::string> m_found_inputs;
std::vector<std::string> m_output_names;
std::shared_ptr<TelemetryExtension> m_telemetry;
std::shared_ptr<SavedModelVariablesIndex> m_variables_index;
std::shared_ptr<VariablesIndex> m_variables_index;
std::shared_ptr<std::map<std::string, std::string>> m_saved_model_input_names;
std::shared_ptr<std::map<std::string, std::string>> m_saved_model_output_names;
bool m_native_format;
bool m_custom_inputs;
// shows if some nodes might be deleted from graph
bool m_graph_changed = false;
};
@ -118,6 +123,8 @@ void InputModel::InputModelTFImpl::load_places() {
std::set<std::string> op_names_with_consumers;
std::map<std::string, uint64_t> op_statistics;
m_custom_inputs = false;
m_inputs.clear();
for (; !m_graph_iterator->is_end(); m_graph_iterator->next()) {
auto node_decoder = m_graph_iterator->get_decoder();
@ -216,7 +223,7 @@ void InputModel::InputModelTFImpl::load_places() {
m_outputs.push_back(output_place);
}
}
std::shared_ptr<SavedModelVariablesIndex> InputModel::InputModelTFImpl::get_variables_index() const {
std::shared_ptr<VariablesIndex> InputModel::InputModelTFImpl::get_variables_index() const {
return m_variables_index;
}
@ -228,7 +235,7 @@ std::shared_ptr<std::map<std::string, std::string>> InputModel::InputModelTFImpl
return m_saved_model_output_names;
}
std::vector<std::shared_ptr<OpPlace>> InputModel::InputModelTFImpl::get_op_places() const {
std::vector<std::shared_ptr<OpPlace>> InputModel::InputModelTFImpl::get_op_places() {
return topologically_sort_op_nodes();
}
@ -240,7 +247,7 @@ std::vector<std::string> InputModel::InputModelTFImpl::get_output_names() const
return m_output_names;
}
std::vector<std::shared_ptr<OpPlace>> InputModel::InputModelTFImpl::topologically_sort_op_nodes() const {
std::vector<std::shared_ptr<OpPlace>> InputModel::InputModelTFImpl::topologically_sort_op_nodes() {
std::vector<std::shared_ptr<OpPlace>> topologically_sorted_ops;
std::stack<std::shared_ptr<OpPlace>> ops_to_do;
std::unordered_set<std::shared_ptr<OpPlace>> ops_done;
@ -309,6 +316,7 @@ std::vector<std::shared_ptr<OpPlace>> InputModel::InputModelTFImpl::topologicall
if (m_tensor_places.find(input_port_name) != m_tensor_places.end()) {
const auto& tensor_place = m_tensor_places[input_port_name];
is_input |= tensor_place->is_input();
m_found_inputs.insert(input_port_name);
}
// 2. check if the producer node is pruned by its output port
@ -336,6 +344,18 @@ std::vector<std::shared_ptr<OpPlace>> InputModel::InputModelTFImpl::topologicall
}
}
// Storing information about found inputs.
// It needs to cover "cutting" a graph, we need to return updated list of inputs
if (current_operation_type == "Placeholder") {
for (auto& name : current_operation_place->get_names()) {
m_found_inputs.insert(name);
// Add unified name if needed
if (name.find(':') == std::string::npos) {
m_found_inputs.insert(name + ":0");
}
}
}
if (can_add) {
topologically_sorted_ops.push_back(current_operation_place);
ops_to_do.pop();
@ -352,7 +372,8 @@ std::vector<std::shared_ptr<OpPlace>> InputModel::InputModelTFImpl::topologicall
InputModel::InputModelTFImpl::InputModelTFImpl(const GraphIterator::Ptr& graph_iterator,
const ov::frontend::InputModel& input_model)
: m_graph_iterator(graph_iterator),
m_input_model(input_model) {
m_input_model(input_model),
m_native_format(false) {
FRONT_END_GENERAL_CHECK(m_graph_iterator, "Null pointer specified for GraphIterator");
load_places();
}
@ -370,15 +391,17 @@ InputModel::InputModelTFImpl::InputModelTFImpl(
const GraphIterator::Ptr& graph_iterator,
const ov::frontend::InputModel& input_model,
const std::shared_ptr<TelemetryExtension>& telemetry,
const std::shared_ptr<SavedModelVariablesIndex>& variables_index,
const std::shared_ptr<VariablesIndex>& variables_index,
const std::shared_ptr<std::map<std::string, std::string>> saved_model_input_names,
const std::shared_ptr<std::map<std::string, std::string>> saved_model_output_names)
const std::shared_ptr<std::map<std::string, std::string>> saved_model_output_names,
const bool native_format)
: m_graph_iterator(graph_iterator),
m_input_model(input_model),
m_telemetry(telemetry),
m_variables_index(variables_index),
m_saved_model_input_names(saved_model_input_names),
m_saved_model_output_names(saved_model_output_names) {
m_saved_model_output_names(saved_model_output_names),
m_native_format(native_format) {
FRONT_END_GENERAL_CHECK(m_graph_iterator, "Null pointer specified for GraphIterator");
m_input_names = graph_iterator->get_input_names();
m_output_names = graph_iterator->get_output_names();
@ -386,7 +409,37 @@ InputModel::InputModelTFImpl::InputModelTFImpl(
}
std::vector<ov::frontend::Place::Ptr> InputModel::InputModelTFImpl::get_inputs() const {
return m_inputs;
if (m_native_format) {
std::vector<ov::frontend::Place::Ptr> found_inputs;
if (m_custom_inputs) {
// When user asks overrding inputs/outputs then some inputs should be
// excluded for output, depends on results after a call of topologically_sort_op_nodes
// For example, model has a two inputs, but after cutting by an output one input
// may be unavailable in path to new output. In such case we need to do not
// return it as an available input, otherwise it won't be connected with a graph.
for (auto& input : m_inputs) {
for (auto& name : input->get_names()) {
if (std::find(m_found_inputs.begin(), m_found_inputs.end(), name) != m_found_inputs.end()) {
found_inputs.push_back(input);
break;
}
}
}
} else {
// Do not return internally used inputs
for (auto& input : m_inputs) {
for (auto& name : input->get_names()) {
if (name == "saver_filename") {
continue;
}
found_inputs.push_back(input);
}
}
}
return found_inputs;
} else {
return m_inputs;
}
}
std::vector<ov::frontend::Place::Ptr> InputModel::InputModelTFImpl::get_outputs() const {
@ -394,20 +447,42 @@ std::vector<ov::frontend::Place::Ptr> InputModel::InputModelTFImpl::get_outputs(
}
ov::frontend::Place::Ptr InputModel::InputModelTFImpl::get_place_by_tensor_name(const std::string& tensorName) const {
if (m_tensor_places.find(tensorName) != m_tensor_places.end())
return m_tensor_places.at(tensorName);
std::string internalTensorName = tensorName;
if (m_saved_model_input_names.get()) {
for (const auto& alt_name : *m_saved_model_input_names) {
if (alt_name.second == tensorName) {
internalTensorName = alt_name.first;
break;
}
}
}
if (m_saved_model_output_names.get()) {
for (const auto& alt_name : *m_saved_model_output_names) {
if (alt_name.second == tensorName) {
internalTensorName = alt_name.first;
break;
}
}
}
if (m_tensor_places.find(internalTensorName) != m_tensor_places.end()) {
return m_tensor_places.at(internalTensorName);
}
// check that operation node exists for which this place is specified
std::string operation_name;
size_t port_idx;
std::string port_type;
tensorflow::extract_operation_name_and_port(tensorName, operation_name, port_idx, port_type);
tensorflow::extract_operation_name_and_port(internalTensorName, operation_name, port_idx, port_type);
if (m_op_places_map.find(operation_name) != m_op_places_map.end()) {
// new Tensor places must be constructed of dynamic rank and type
std::vector<std::string> names = {tensorName};
std::vector<std::string> names = {internalTensorName};
auto m_var_place =
std::make_shared<TensorPlace>(m_input_model, ov::PartialShape::dynamic(), ov::element::dynamic, names);
m_tensor_places[tensorName] = m_var_place;
m_tensor_places[internalTensorName] = m_var_place;
return m_var_place;
}
@ -431,6 +506,13 @@ void InputModel::InputModelTFImpl::override_all_inputs(const std::vector<ov::fro
for (const auto& input_place : inputs) {
m_inputs.push_back(castToTensorPlace(input_place));
}
if (m_native_format) {
// Need to read actual outputs
m_custom_inputs = true;
m_found_inputs.clear();
topologically_sort_op_nodes();
}
}
void InputModel::InputModelTFImpl::override_all_outputs(const std::vector<ov::frontend::Place::Ptr>& outputs) {
@ -439,6 +521,13 @@ void InputModel::InputModelTFImpl::override_all_outputs(const std::vector<ov::fr
for (const auto& output_place : outputs) {
m_outputs.push_back(castToTensorPlace(output_place));
}
if (m_native_format) {
// Need to read actual inputs
m_custom_inputs = true;
m_found_inputs.clear();
topologically_sort_op_nodes();
}
}
void InputModel::InputModelTFImpl::extract_subgraph(const std::vector<ov::frontend::Place::Ptr>& inputs,
@ -483,17 +572,19 @@ void InputModel::InputModelTFImpl::set_tensor_value(ov::frontend::Place::Ptr pla
InputModel::InputModel(const GraphIterator::Ptr& graph_iterator,
const std::shared_ptr<TelemetryExtension>& telemetry,
const std::shared_ptr<SavedModelVariablesIndex>& variables_index,
const std::shared_ptr<VariablesIndex>& variables_index,
const std::shared_ptr<std::map<std::string, std::string>> saved_model_input_names,
const std::shared_ptr<std::map<std::string, std::string>> saved_model_output_names)
const std::shared_ptr<std::map<std::string, std::string>> saved_model_output_names,
const bool native_format)
: _impl{std::make_shared<InputModelTFImpl>(graph_iterator,
*this,
telemetry,
variables_index,
saved_model_input_names,
saved_model_output_names)} {}
saved_model_output_names,
native_format)} {}
std::shared_ptr<SavedModelVariablesIndex> InputModel::get_variables_index() {
std::shared_ptr<VariablesIndex> InputModel::get_variables_index() {
return _impl->get_variables_index();
}
@ -573,7 +664,6 @@ ov::element::Type InputModel::get_element_type(const ov::frontend::Place::Ptr& p
void InputModel::set_tensor_value(const ov::frontend::Place::Ptr& place, const void* value) {
_impl->set_tensor_value(place, value);
}
} // namespace tensorflow
} // namespace frontend
} // namespace ov

View File

@ -16,7 +16,7 @@ namespace tensorflow {
class OpPlace;
class TensorPlace;
class SavedModelVariablesIndex;
class VariablesIndex;
class InputModel : public ov::frontend::InputModel {
friend class TranslateSession;
@ -26,16 +26,16 @@ class InputModel : public ov::frontend::InputModel {
std::vector<std::string> get_input_names() const;
std::vector<std::string> get_output_names() const;
std::vector<std::shared_ptr<OpPlace>> get_op_places() const;
std::map<std::string, std::shared_ptr<TensorPlace>> get_tensor_places() const;
std::map<std::string, Output<Node>> get_tensor_values() const;
std::shared_ptr<InputModel> get_body_input_model(const std::string& body_input_model_name) const;
public:
explicit InputModel(const GraphIterator::Ptr& graph_iterator,
const std::shared_ptr<TelemetryExtension>& telemetry = {},
const std::shared_ptr<SavedModelVariablesIndex>& variables_index = {},
const std::shared_ptr<VariablesIndex>& variables_index = {},
const std::shared_ptr<std::map<std::string, std::string>> saved_model_input_names = nullptr,
const std::shared_ptr<std::map<std::string, std::string>> saved_model_output_names = nullptr);
const std::shared_ptr<std::map<std::string, std::string>> saved_model_output_names = nullptr,
const bool native_format = false);
std::vector<ov::frontend::Place::Ptr> get_inputs() const override;
std::vector<ov::frontend::Place::Ptr> get_outputs() const override;
@ -49,9 +49,11 @@ public:
void set_element_type(const ov::frontend::Place::Ptr& place, const ov::element::Type&) override;
ov::element::Type get_element_type(const ov::frontend::Place::Ptr& place) const override;
void set_tensor_value(const ov::frontend::Place::Ptr& place, const void* value) override;
std::shared_ptr<SavedModelVariablesIndex> get_variables_index();
std::shared_ptr<VariablesIndex> get_variables_index();
std::shared_ptr<std::map<std::string, std::string>> get_saved_model_input_names() const;
std::shared_ptr<std::map<std::string, std::string>> get_saved_model_output_names() const;
std::map<std::string, std::shared_ptr<TensorPlace>> get_tensor_places() const;
};
} // namespace tensorflow

View File

@ -28,7 +28,13 @@ OutputVector translate_partitioned_call_op(const NodeContext& node) {
}
// try to retrieve ov::Model for body graph
auto body_model = translate_session->get_body_ov_model(operation_type);
// Here is a workaround mostly for Saved Model file format. In many tests we use internal name "Identity"
// to get an output. By default names are cleaned for inputs and outputs because they are available inside
// of StatefulPartitionedCall. And because otherwise they will cause a duplicates. But we need to keep them
// for "internal functions of Saved Model", which are named "__inference_signature_wrapper" or
// "__inference_wrapped_model".
auto body_model =
translate_session->get_body_ov_model(operation_type, operation_type.find("wrappe") == std::string::npos);
FRONT_END_OP_CONVERSION_CHECK(
body_model,
"[TensorFlow Frontend] Internal error or incorrect input model: body graph is not found for " + operation_type +

View File

@ -0,0 +1,60 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "common_op_table.hpp"
#include "input_model.hpp"
#include "openvino/opsets/opset8.hpp"
using namespace std;
using namespace ov::opset8;
using namespace ov;
namespace ov {
namespace frontend {
namespace tensorflow {
namespace op {
OutputVector translate_placeholder_linked_op(const NodeContext& node) {
auto dtype = node.get_attribute<ov::element::Type>("dtype");
auto shape = node.get_attribute<ov::PartialShape>("shape", ov::PartialShape::dynamic());
auto translate_session = node.get_translate_session();
TENSORFLOW_OP_VALIDATION(node,
translate_session,
"[TensorFlow Frontend] Internal error: Translate session is nullptr.");
auto model = reinterpret_cast<ov::frontend::tensorflow::InputModel*>(translate_session->get_input_model().get());
auto tensor_places = model->get_tensor_places();
auto saved_model_input_names = model->get_saved_model_input_names();
if (saved_model_input_names.get() && saved_model_input_names->size() > 0) {
auto input_name = saved_model_input_names->find(node.get_name());
if (input_name == saved_model_input_names->end()) {
input_name = saved_model_input_names->find(node.get_name() + ":0");
}
if (input_name != saved_model_input_names->end()) {
auto tensor_place = tensor_places.find(input_name->second);
if (tensor_place != tensor_places.end()) {
shape = tensor_place->second->get_partial_shape();
}
}
}
if (shape.rank().is_static() && shape.rank().get_length() == 0 && node.has_attribute("_output_shapes")) {
// we know some cases when Placeholder operation has empty scalar `shape` attribute value
// and non-empty `_output_shapes` attribute value.
// `_output_shapes` attribute value turns to be correct in this case
auto output_shapes = node.get_attribute<std::vector<ov::PartialShape>>("_output_shapes");
if (output_shapes.size() == 1 && output_shapes[0].rank().is_static()) {
shape = output_shapes[0];
}
}
auto res = std::make_shared<Parameter>(dtype, shape);
set_node_name(node.get_name(), res);
return res->outputs();
}
} // namespace op
} // namespace tensorflow
} // namespace frontend
} // namespace ov

View File

@ -21,7 +21,7 @@ namespace op {
// Reading variable from shard file
template <typename T>
static std::shared_ptr<ov::Node> read_variable(std::shared_ptr<SavedModelVariablesIndex> var_index,
static std::shared_ptr<ov::Node> read_variable(std::shared_ptr<VariablesIndex> var_index,
const ov::element::Type ov_type,
const ov::Shape shape,
const ::tensorflow::BundleEntryProto& entry,
@ -45,7 +45,7 @@ static std::shared_ptr<ov::Node> read_variable(std::shared_ptr<SavedModelVariabl
}
OutputVector translate_varhandle_op(const NodeContext& node) {
default_op_checks(node, 0, {"VarHandleOp"});
default_op_checks(node, 0, {"VarHandleOp", "VariableV2"});
auto translate_session = node.get_translate_session();
TENSORFLOW_OP_VALIDATION(node,
translate_session,
@ -56,6 +56,9 @@ OutputVector translate_varhandle_op(const NodeContext& node) {
std::shared_ptr<Node> const_node;
if (ov_type == element::undefined) {
const_node = std::make_shared<UnsupportedConstant>();
} else if (var_index.get() == nullptr) {
auto shape = node.get_attribute<::ov::PartialShape>("shape").get_shape();
const_node = std::make_shared<Parameter>(ov_type, shape);
} else {
// Getting variable description from variables index
const char* entry_data = nullptr;
@ -114,11 +117,18 @@ OutputVector translate_varisinitialized_op(const NodeContext& node) {
}
OutputVector translate_readvariable_op(const NodeContext& node) {
default_op_checks(node, 1, {"ReadVariableOp"});
default_op_checks(node, 1, {"ReadVariableOp", "Assign"});
// Documentation says it should return only one tensor with dtype, but
// _output_shapes in a vector of shapes and it means it could have multiple outputs
// _output_shapes is a vector of shapes and it means it may have multiple outputs
// https://www.tensorflow.org/api_docs/python/tf/raw_ops/ReadVariableOp
auto output_shapes = node.get_attribute<std::vector<::ov::PartialShape>>("_output_shapes");
auto tmp_output_shapes = node.get_attribute_as_any("_output_shapes");
if (tmp_output_shapes.empty() || !tmp_output_shapes.is<std::vector<::ov::PartialShape>>()) {
return {node.get_input(0).get_node_shared_ptr()};
}
auto output_shapes = tmp_output_shapes.as<std::vector<::ov::PartialShape>>();
OutputVector outs = {};

View File

@ -42,6 +42,7 @@ TF_OP_CONVERTER(translate_staticregexfullmatch_op);
TF_OP_CONVERTER(translate_stringjoin_op);
TF_OP_CONVERTER(translate_mergev2checkpoint_op);
TF_OP_CONVERTER(translate_while_op);
TF_OP_CONVERTER(translate_placeholder_linked_op);
const std::map<std::string, CreatorFunction> get_supported_ops() {
return {
@ -202,7 +203,7 @@ const std::map<std::string, CreatorFunction> get_supported_ops() {
{"DynamicStitch", CreatorFunction(translate_parallel_dynamic_stitch_op)},
{"ParallelDynamicStitch", CreatorFunction(translate_parallel_dynamic_stitch_op)},
{"PartitionedCall", CreatorFunction(translate_partitioned_call_op)},
{"Placeholder", CreatorFunction(translate_placeholder_op)},
{"Placeholder", CreatorFunction(translate_placeholder_linked_op)},
{"PlaceholderWithDefault", CreatorFunction(translate_placeholder_with_default_op)},
{"PreventGradient", CreatorFunction(translate_identity_op)},
{"Range", CreatorFunction(translate_range_op)},
@ -255,21 +256,26 @@ const std::map<std::string, CreatorFunction> get_supported_ops() {
{"TopK", CreatorFunction(translate_top_k_op)},
{"TopKV2", CreatorFunction(translate_top_k_v2_op)},
{"Transpose", CreatorFunction(translate_transpose_op)},
{"ReadVariableOp", CreatorFunction(translate_readvariable_op)},
{"AssignVariableOp", CreatorFunction(translate_assignvariable_op)},
{"VarIsInitializedOp", CreatorFunction(translate_varisinitialized_op)},
{"VarHandleOp", CreatorFunction(translate_varhandle_op)},
{"RestoreV2", CreatorFunction(translate_restorev2_op)},
{"StaticRegexFullMatch", CreatorFunction(translate_staticregexfullmatch_op)},
{"StringJoin", CreatorFunction(translate_stringjoin_op)},
{"ShardedFilename", CreatorFunction(translate_identity_op)},
{"MergeV2Checkpoints", CreatorFunction(translate_identity_op)},
{"Unpack", CreatorFunction(translate_unpack_op)},
{"While", CreatorFunction(translate_while_op)},
{"Where", CreatorFunction(translate_where_op)},
{"Xdivy", CreatorFunction(translate_x_div_y_op)},
{"ZerosLike", CreatorFunction(translate_zeros_like_op)},
// Translators for SavedModel and MetaGraph
{"Assign", CreatorFunction(translate_readvariable_op)},
{"AssignVariableOp", CreatorFunction(translate_assignvariable_op)},
{"IsVariableInitialized", CreatorFunction(translate_varisinitialized_op)},
{"MergeV2Checkpoints", CreatorFunction(translate_identity_op)},
{"ReadVariableOp", CreatorFunction(translate_readvariable_op)},
{"RestoreV2", CreatorFunction(translate_restorev2_op)},
{"ShardedFilename", CreatorFunction(translate_identity_op)},
{"StaticRegexFullMatch", CreatorFunction(translate_staticregexfullmatch_op)},
{"StringJoin", CreatorFunction(translate_stringjoin_op)},
{"VarIsInitializedOp", CreatorFunction(translate_varisinitialized_op)},
{"VarHandleOp", CreatorFunction(translate_varhandle_op)},
{"VariableV2", CreatorFunction(translate_varhandle_op)},
// Translators for internal operations
{"BlockLSTM", CreatorFunction(translate_block_lstm_op)},
{"GRUBlockCell", CreatorFunction(translate_gru_block_cell_op)},

View File

@ -45,13 +45,27 @@ std::vector<T> reorder_ops_by_names(const std::vector<std::string>& names, const
/// \returns True if node was updated, false otherwise
static bool apply_saved_model_names(std::shared_ptr<ov::Node> node,
const std::shared_ptr<std::map<std::string, std::string>>& saved_model_names) {
for (size_t i = 0; i < node->get_output_size(); ++i) {
const auto& node_names = node->get_output_tensor(i).get_names();
for (const auto& name : node_names) {
const auto& saved_model_name = saved_model_names->find(name);
if (saved_model_name != saved_model_names->end()) {
node->set_friendly_name(saved_model_name->second);
return true;
if (std::dynamic_pointer_cast<ov::opset8::Parameter>(node)) {
for (size_t i = 0; i < node->get_output_size(); ++i) {
const auto& node_names = node->get_output_tensor(i).get_names();
for (const auto& name : node_names) {
const auto& saved_model_name = saved_model_names->find(name);
if (saved_model_name != saved_model_names->end()) {
set_node_name(saved_model_name->second, node);
return true;
}
}
}
} else if (std::dynamic_pointer_cast<ov::opset10::Result>(node)) {
for (size_t i = 0; i < node->get_input_size(); ++i) {
const auto& node_names = node->get_input_tensor(i).get_names();
for (const auto& name : node_names) {
const auto& saved_model_name = saved_model_names->find(name);
if (saved_model_name != saved_model_names->end()) {
node->set_friendly_name(saved_model_name->second);
node->get_input_tensor(i).add_names({saved_model_name->second});
return true;
}
}
}
}
@ -179,11 +193,6 @@ void TranslateSession::translate_graph(const ov::frontend::InputModel::Ptr& inpu
auto param = std::make_shared<ov::opset8::Parameter>(input_type, input_shape);
set_node_name(input_name, param);
if (saved_model_inputs.get() && saved_model_inputs->size() > 0) {
if (!apply_saved_model_names(param, saved_model_inputs)) {
param->get_output_tensor(0).add_names({"saved_model_unused"});
}
}
params.push_back(param);
ng_op_map[input_name] = {NamedOutput(param)};
}
@ -348,30 +357,10 @@ void TranslateSession::translate_graph(const ov::frontend::InputModel::Ptr& inpu
if (port_type == "none") {
for (const auto& node_output : indexed_from_named(ng_op_map[operation_name])) {
auto result_node = std::make_shared<ov::opset8::Result>(node_output);
// Customize output name in case we have mapping from Saved Model format
if (saved_model_outputs.get() && saved_model_outputs->size() > 0) {
bool isUsed = true;
for (const auto& name : model_output_tensor_place->get_names()) {
auto saved_model_name = saved_model_outputs->find(name);
if (saved_model_name == saved_model_outputs->end()) {
saved_model_name = saved_model_outputs->find(name + ":0");
}
if (saved_model_name != saved_model_outputs->end()) {
result_node->set_friendly_name(saved_model_name->second);
results.push_back(result_node);
isUsed = false;
break;
}
if (!isUsed) {
result_node->get_input_tensor(0).add_names({"saved_model_unused"});
}
}
} else {
// to be aligned with Legacy Frontend we set a name along with output port index
// though, the Result name is not used in the OV API 2.0 but it is checked in MO args tests
result_node->set_friendly_name(model_output_name + ":0");
results.push_back(result_node);
}
// to be aligned with Legacy Frontend we set a name along with output port index
// though, the Result name is not used in the OV API 2.0 but it is checked in MO args tests
result_node->set_friendly_name(model_output_name + ":0");
results.push_back(result_node);
}
} else if (port_type == "out") {
const auto& node_outputs = indexed_from_named(ng_op_map[operation_name]);
@ -444,6 +433,26 @@ void TranslateSession::translate_graph(const ov::frontend::InputModel::Ptr& inpu
}
}
if (saved_model_inputs.get() && saved_model_inputs->size() > 0) {
for (auto param : params) {
// If parameter isn't found in a map of known inputs - mark them as unused
// and try to remove it in the normalize step later
if (!apply_saved_model_names(param, saved_model_inputs)) {
param->get_output_tensor(0).add_names({"saved_model_unused"});
}
}
}
if (saved_model_outputs.get() && saved_model_outputs->size() > 0) {
for (auto result : results) {
// If parameter isn't found in a map of known outputs - mark them as unused
// and try to remove it in the normalize step later
if (!apply_saved_model_names(result, saved_model_outputs)) {
result->get_input_tensor(0).add_names({"saved_model_unused"});
}
}
}
// reorder Parameter and Result nodes according to the requested order
// of input and output names from the original model
// during translation and topologically sorting this order could be lost
@ -455,7 +464,7 @@ void TranslateSession::translate_graph(const ov::frontend::InputModel::Ptr& inpu
ov_model = std::make_shared<ov::Model>(ordered_results, ordered_params, m_model_name);
}
std::shared_ptr<ov::Model> TranslateSession::get_body_ov_model(const std::string& body_graph_name) {
std::shared_ptr<ov::Model> TranslateSession::get_body_ov_model(const std::string& body_graph_name, bool clear_names) {
std::shared_ptr<ov::Model> body_model = nullptr;
auto input_model = std::dynamic_pointer_cast<InputModel>(m_input_model);
if (m_cached_body_models->count(body_graph_name)) {
@ -474,9 +483,11 @@ std::shared_ptr<ov::Model> TranslateSession::get_body_ov_model(const std::string
// before caching, erase tensor names from the body graph
// otherwise, it can lead tensor names conflicts
for (const auto& op : body_model->get_ordered_ops()) {
for (size_t ind = 0; ind < op->get_output_size(); ++ind) {
op->get_output_tensor(ind).set_names({});
if (clear_names) {
for (const auto& op : body_model->get_ordered_ops()) {
for (size_t ind = 0; ind < op->get_output_size(); ++ind) {
op->get_output_tensor(ind).set_names({});
}
}
}

View File

@ -29,7 +29,7 @@ public:
const ov::OutputVector& ov_inputs,
ov::OutputVector& ov_outputs);
std::shared_ptr<ov::Model> get_body_ov_model(const std::string& body_graph_name);
std::shared_ptr<ov::Model> get_body_ov_model(const std::string& body_graph_name, bool clear_names = true);
ov::frontend::InputModel::Ptr get_input_model(void) const {
return m_input_model;

View File

@ -44,6 +44,8 @@ static T smUnpack(char*& ptr, const char* ptr_end) {
return 0;
}
/// \brief Structure is for storing information about block in Varaibles Index file.
/// It defines only offset and block size, no information about exact content.
struct VIBlock {
uint64_t m_size;
uint64_t m_offset;
@ -54,6 +56,11 @@ struct VIBlock {
}
};
#define VARIABLES_INDEX_FOOTER_SIZE 48
/// \brief Structure is for storing information about Variables Index footer information.
/// It contains description of two blocks and a magic number for a file verification.
/// Currently, it is placed in last VARIABLES_INDEX_FOOTER_SIZE bytes at the end of a file.
struct VIFooter {
VIBlock m_metaIndex;
VIBlock m_index;
@ -66,7 +73,10 @@ struct VIFooter {
void read(std::ifstream& fs) {
fs.seekg(0, std::ios::end);
size_t size = fs.tellg();
char footerData[48] = {}, *ptr = &footerData[0];
FRONT_END_GENERAL_CHECK(size >= VARIABLES_INDEX_FOOTER_SIZE,
"Wrong index file, file size is less than minimal expected");
char footerData[VARIABLES_INDEX_FOOTER_SIZE] = {}, *ptr = &footerData[0];
fs.seekg(size - sizeof(footerData));
fs.read(ptr, sizeof(footerData));
@ -84,14 +94,18 @@ struct VIFooter {
}
};
void SavedModelVariablesIndex::read_variables_index_block(std::ifstream& fs,
const VIBlock& index,
std::vector<char>& data,
uint32_t& offset,
uint32_t& offset_end) {
void VariablesIndex::read_variables_index_block(std::ifstream& fs,
const VIBlock& index,
std::vector<char>& data,
uint32_t& offset,
uint32_t& offset_end) {
size_t block_size = index.m_size;
data.clear();
data.resize(block_size + 5 /*kBlockTrailerSize*/);
FRONT_END_GENERAL_CHECK(index.m_offset <= m_variables_index_size,
"Block offset is bigger than variables index size");
FRONT_END_GENERAL_CHECK(index.m_offset + data.size() <= m_variables_index_size,
"Block size is bigger than variables index size");
fs.seekg(index.m_offset, std::ios::beg);
fs.read(data.data(), data.size());
#ifndef ENABLE_SNAPPY_COMPRESSION
@ -117,11 +131,11 @@ void SavedModelVariablesIndex::read_variables_index_block(std::ifstream& fs,
offset = smReadFixed<uint32_t>(data.data() + offset_end);
}
void SavedModelVariablesIndex::read_variables_index_pair(char*& ptr,
const char* ptr_end,
std::string& key,
char*& value,
uint32_t& val_length) {
void VariablesIndex::read_variables_index_pair(char*& ptr,
const char* ptr_end,
std::string& key,
char*& value,
uint32_t& val_length) {
uint32_t shared, nonShared;
shared = smUnpack<uint32_t>(ptr, ptr_end);
nonShared = smUnpack<uint32_t>(ptr, ptr_end);
@ -140,8 +154,10 @@ void SavedModelVariablesIndex::read_variables_index_pair(char*& ptr,
ptr = value + val_length;
}
void SavedModelVariablesIndex::read_variables_index(std::ifstream& fs,
std::map<std::string, std::vector<char>>& varIndex) {
void VariablesIndex::read_variables_index(std::ifstream& fs, std::map<std::string, std::vector<char>>& varIndex) {
fs.seekg(0, std::ios::end);
m_variables_index_size = fs.tellg();
VIFooter footer;
footer.read(fs);
@ -178,12 +194,12 @@ void SavedModelVariablesIndex::read_variables_index(std::ifstream& fs,
}
}
void SavedModelVariablesIndex::read_bundle_header() {
void VariablesIndex::read_bundle_header() {
auto item = m_variables_index.find("");
FRONT_END_GENERAL_CHECK(item != m_variables_index.end(), "Bundle Header isn't found in index");
::tensorflow::BundleHeaderProto bundleHeader;
FRONT_END_GENERAL_CHECK(bundleHeader.ParseFromString(item->second.data()),
FRONT_END_GENERAL_CHECK(bundleHeader.ParseFromArray(item->second.data(), static_cast<int>(item->second.size())),
"Bundle Header: Cannot parse Bundle Header");
FRONT_END_GENERAL_CHECK(bundleHeader.version().producer() == 1, "Bundle Header: Unsupported producer version");
FRONT_END_GENERAL_CHECK(bundleHeader.version().min_consumer() == 0, "Bundle Header: Unsupported consumer version");
@ -192,11 +208,14 @@ void SavedModelVariablesIndex::read_bundle_header() {
m_total_shards = bundleHeader.num_shards();
}
void SavedModelVariablesIndex::read_checkpointable_object_graph() {
void VariablesIndex::read_checkpointable_object_graph() {
m_variables_map.clear();
auto item = m_variables_index.find("_CHECKPOINTABLE_OBJECT_GRAPH");
FRONT_END_GENERAL_CHECK(item != m_variables_index.end(), "Checkpointable Object Graph isn't found in index");
if (item == m_variables_index.end()) {
// Might be missing for some models. In such case all variables should be resolved thru RestoreV2
return;
}
::tensorflow::BundleEntryProto entry;
FRONT_END_GENERAL_CHECK(entry.ParseFromArray(item->second.data(), static_cast<int>(item->second.size())),
@ -231,32 +250,7 @@ void SavedModelVariablesIndex::read_checkpointable_object_graph() {
}
}
bool GraphIteratorSavedModel::is_valid_signature(const ::tensorflow::SignatureDef& signature) const {
const std::map<::tensorflow::DataType, ov::element::Type> types{
{::tensorflow::DataType::DT_BOOL, ov::element::boolean},
{::tensorflow::DataType::DT_INT16, ov::element::i16},
{::tensorflow::DataType::DT_INT32, ov::element::i32},
{::tensorflow::DataType::DT_INT64, ov::element::i64},
{::tensorflow::DataType::DT_HALF, ov::element::f16},
{::tensorflow::DataType::DT_FLOAT, ov::element::f32},
{::tensorflow::DataType::DT_DOUBLE, ov::element::f64},
{::tensorflow::DataType::DT_UINT8, ov::element::u8},
{::tensorflow::DataType::DT_INT8, ov::element::i8},
{::tensorflow::DataType::DT_BFLOAT16, ov::element::bf16},
{::tensorflow::DataType::DT_STRING, ov::element::undefined}};
for (const auto& it : signature.inputs()) {
if (it.second.name().empty() || types.find(it.second.dtype()) == types.end())
return false;
}
for (const auto& it : signature.outputs()) {
if (it.second.name().empty() || types.find(it.second.dtype()) == types.end())
return false;
}
return true;
}
bool SavedModelVariablesIndex::read_variables(std::ifstream& vi_stream, const std::string& path) {
bool VariablesIndex::read_variables(std::ifstream& vi_stream, const std::string& path, const bool is_saved_model) {
m_variables_index.clear();
read_variables_index(vi_stream, m_variables_index);
read_bundle_header();
@ -264,10 +258,15 @@ bool SavedModelVariablesIndex::read_variables(std::ifstream& vi_stream, const st
std::vector<char> suffix(20);
for (int32_t shard = 0; shard < m_total_shards; ++shard) {
std::snprintf(suffix.data(), suffix.size(), "data-%05d-of-%05d", shard, m_total_shards);
std::string fullPath = ov::util::path_join({path, "variables", std::string("variables.") + suffix.data()});
std::string fullPath;
if (is_saved_model) {
fullPath = ov::util::path_join({path, "variables", std::string("variables.") + suffix.data()});
} else {
fullPath = path + "." + suffix.data();
}
m_data_files[shard] =
std::shared_ptr<std::ifstream>(new std::ifstream(fullPath, std::ifstream::in | std::ifstream::binary));
FRONT_END_GENERAL_CHECK(m_data_files[shard]->is_open(), "Saved Model's variable index file does not exist");
FRONT_END_GENERAL_CHECK(m_data_files[shard]->is_open(), "Variable index data file does not exist");
}
read_checkpointable_object_graph();
@ -275,7 +274,7 @@ bool SavedModelVariablesIndex::read_variables(std::ifstream& vi_stream, const st
}
#if defined(OPENVINO_ENABLE_UNICODE_PATH_SUPPORT) && defined(_WIN32)
bool SavedModelVariablesIndex::read_variables(std::ifstream& vi_stream, const std::wstring& path) {
bool VariablesIndex::read_variables(std::ifstream& vi_stream, const std::wstring& path, const bool is_saved_model) {
m_variables_index.clear();
read_variables_index(vi_stream, m_variables_index);
read_bundle_header();
@ -283,11 +282,15 @@ bool SavedModelVariablesIndex::read_variables(std::ifstream& vi_stream, const st
std::vector<wchar_t> suffix(20);
for (int32_t shard = 0; shard < m_total_shards; ++shard) {
swprintf_s(suffix.data(), suffix.size(), L"data-%05d-of-%05d", shard, m_total_shards);
std::wstring fullPath =
ov::util::path_join_w({path, L"variables", std::wstring(L"variables.") + suffix.data()});
std::wstring fullPath;
if (is_saved_model) {
fullPath = ov::util::path_join_w({path, L"variables", std::wstring(L"variables.") + suffix.data()});
} else {
fullPath = path + L"." + suffix.data();
}
m_data_files[shard] =
std::shared_ptr<std::ifstream>(new std::ifstream(fullPath, std::ifstream::in | std::ifstream::binary));
FRONT_END_GENERAL_CHECK(m_data_files[shard]->is_open(), "Saved Model's variable index file does not exist");
FRONT_END_GENERAL_CHECK(m_data_files[shard]->is_open(), "Variable index data file does not exist");
}
read_checkpointable_object_graph();
@ -296,14 +299,20 @@ bool SavedModelVariablesIndex::read_variables(std::ifstream& vi_stream, const st
#endif
struct PtrNode {
using SharedPtrNode = std::shared_ptr<PtrNode>;
const ::tensorflow::NodeDef* node;
std::vector<PtrNode*> inputs;
std::vector<PtrNode*> outputs;
std::vector<SharedPtrNode> inputs;
std::vector<SharedPtrNode> outputs;
PtrNode() : node(nullptr), inputs(), outputs() {}
PtrNode(const ::tensorflow::NodeDef& src_node, const std::map<std::string, PtrNode*>& node_dictionary) {
PtrNode(const ::tensorflow::NodeDef& src_node) {
node = &src_node;
}
void associate_node(const SharedPtrNode shared_node, const std::map<std::string, SharedPtrNode>& node_dictionary) {
FRONT_END_GENERAL_CHECK(shared_node.get() == this, "Only current object is expected for association");
std::vector<std::string> parsedName;
for (const auto& input_name : node->input()) {
parse_node_name(input_name, parsedName);
@ -313,17 +322,25 @@ struct PtrNode {
continue;
}
input_node->second->outputs.push_back(this);
input_node->second->outputs.push_back(shared_node);
inputs.push_back(input_node->second);
}
}
void find_parent_by_op(const std::string& op, std::vector<PtrNode*>& result) const {
void find_parent_by_op(const std::string& op,
std::vector<SharedPtrNode>& result,
std::shared_ptr<std::vector<const PtrNode*>> walked = nullptr) const {
if (walked.get() == nullptr) {
walked = std::make_shared<std::vector<const PtrNode*>>();
}
for (auto input : inputs) {
if (input->op() == op) {
result.push_back(input);
}
input->find_parent_by_op(op, result);
if (find(walked->begin(), walked->end(), input.get()) == walked->end()) {
walked->push_back(this);
input->find_parent_by_op(op, result, walked);
}
}
}
@ -350,7 +367,7 @@ struct PtrNode {
static void read_stateful_partitioned_call(const std::shared_ptr<::tensorflow::GraphDef> graph_def,
const ::tensorflow::NodeDef& partCall,
std::map<std::string, PtrNode*>& node_dictionary) {
std::map<std::string, PtrNode::SharedPtrNode>& node_dictionary) {
FRONT_END_GENERAL_CHECK(partCall.op() == "StatefulPartitionedCall", "Passed node isn't StatefulPartitionedCall");
std::string func_name = partCall.attr().at("f").func().name();
@ -366,7 +383,7 @@ static void read_stateful_partitioned_call(const std::shared_ptr<::tensorflow::G
FRONT_END_GENERAL_CHECK(func_def, "Function isn't found in the library");
FRONT_END_GENERAL_CHECK(graph_def->has_library(), "GraphDef contains functions, but doesn't have the library");
std::map<std::string, PtrNode*> nodes;
std::map<std::string, PtrNode::SharedPtrNode> nodes;
// Filling temporary input nodes for exact function
for (int i = 0; i < func_def->signature().input_arg_size(); ++i) {
@ -380,7 +397,9 @@ static void read_stateful_partitioned_call(const std::shared_ptr<::tensorflow::G
// Parsing nodes and inline partitioned calls
for (const auto& node : func_def->node_def()) {
nodes[node.name()] = new PtrNode(node, nodes);
auto shared_node = std::make_shared<PtrNode>(node);
shared_node->associate_node(shared_node, nodes);
nodes[node.name()] = shared_node;
if (node.op() == "StatefulPartitionedCall") {
read_stateful_partitioned_call(graph_def, node, nodes);
@ -403,12 +422,14 @@ static void read_stateful_partitioned_call(const std::shared_ptr<::tensorflow::G
}
}
void GraphIteratorSavedModel::map_assignvariable(const std::shared_ptr<::tensorflow::GraphDef> graph_def,
std::map<std::string, std::string>& variables_map) const {
std::map<std::string, PtrNode*> nodes;
void VariablesIndex::map_assignvariable(const std::shared_ptr<::tensorflow::GraphDef> graph_def,
std::map<std::string, std::string>& variables_map) {
std::map<std::string, PtrNode::SharedPtrNode> nodes;
for (const auto& node : graph_def->node()) {
nodes[node.name()] = new PtrNode(node, nodes);
auto shared_node = std::make_shared<PtrNode>(node);
shared_node->associate_node(shared_node, nodes);
nodes[node.name()] = shared_node;
if (node.op() == "StatefulPartitionedCall") {
read_stateful_partitioned_call(graph_def, node, nodes);
@ -416,67 +437,65 @@ void GraphIteratorSavedModel::map_assignvariable(const std::shared_ptr<::tensorf
}
for (const auto& node : nodes) {
if (node.second->op() != "AssignVariableOp") {
continue;
if (node.second->op() == "AssignVariableOp") {
// TODO: assets reading
std::vector<PtrNode::SharedPtrNode> restorev2_nodes;
std::vector<PtrNode::SharedPtrNode> varhandle_nodes;
node.second->find_parent_by_op("RestoreV2", restorev2_nodes);
node.second->find_parent_by_op("VarHandleOp", varhandle_nodes);
if (restorev2_nodes.size() == 1 && varhandle_nodes.size() == 1) {
std::vector<std::string> restore_output;
// Expected path is: RestoreV2 -(output_index)-(0)-> Identity -(0)-(1)-> AssignVariableOp
PtrNode::parse_node_name(node.second->inputs[1]->node->input(0), restore_output);
int output_index = std::atoi(restore_output[restore_output.size() - 1].c_str());
// Expected path is: Const(tensor_names) -(0)-(1)-> RestoreV2
const auto& variable_name =
restorev2_nodes[0]->inputs[1]->node->attr().at("value").tensor().string_val(output_index);
variables_map[varhandle_nodes[0]->node->name()] = variable_name;
}
} else if (node.second->op() == "Assign") {
std::vector<PtrNode::SharedPtrNode> restorev2_nodes;
std::vector<PtrNode::SharedPtrNode> variablev2_nodes;
node.second->find_parent_by_op("RestoreV2", restorev2_nodes);
node.second->find_parent_by_op("VariableV2", variablev2_nodes);
// Added support of Variable nodes in case no associated VariableV2 nodes found
if (variablev2_nodes.size() == 0) {
node.second->find_parent_by_op("Variable", variablev2_nodes);
}
if (restorev2_nodes.size() == 1 && variablev2_nodes.size() == 1) {
std::vector<std::string> restore_output;
// Expected path is: RestoreV2 -(output_index)-(0)-> Assign
PtrNode::parse_node_name(node.second->node->input(1), restore_output);
int output_index = std::atoi(restore_output[restore_output.size() - 1].c_str());
// Expected path is: Const(tensor_names) -(0)-(1)-> RestoreV2
const auto& variable_name =
restorev2_nodes[0]->inputs[1]->node->attr().at("value").tensor().string_val(output_index);
variables_map[variablev2_nodes[0]->node->name()] = variable_name;
}
}
}
// TODO: assets reading
std::vector<PtrNode*> restorev2_nodes;
std::vector<PtrNode*> varhandle_nodes;
node.second->find_parent_by_op("RestoreV2", restorev2_nodes);
node.second->find_parent_by_op("VarHandleOp", varhandle_nodes);
FRONT_END_GENERAL_CHECK(restorev2_nodes.size() == 1, "Found unexpected amount of RestoreV2 nodes");
FRONT_END_GENERAL_CHECK(varhandle_nodes.size() == 1, "Found unexpected amount of VarHandleOp nodes");
std::vector<std::string> restore_output;
// Expected path is: RestoreV2 -(output_index)-(0)-> Identity -(0)-(1)-> AssignVariableOp
PtrNode::parse_node_name(node.second->inputs[1]->node->input(0), restore_output);
int output_index = std::atoi(restore_output[restore_output.size() - 1].c_str());
// Expected path is: Const(tensor_names) -(0)-(1)-> RestoreV2
const auto& variable_name =
restorev2_nodes[0]->inputs[1]->node->attr().at("value").tensor().string_val(output_index);
variables_map[varhandle_nodes[0]->node->name()] = variable_name;
// Removing cross-links, otherwise memory leak will be caused by lost shared pointers
for (auto node : nodes) {
node.second->inputs.clear();
node.second->outputs.clear();
}
nodes.clear();
}
bool GraphIteratorSavedModel::is_supported(const std::string& path) {
return ov::util::directory_exists(path) && ov::util::file_exists(ov::util::path_join({path, "saved_model.pb"}));
}
#if defined(OPENVINO_ENABLE_UNICODE_PATH_SUPPORT) && defined(_WIN32)
bool GraphIteratorSavedModel::is_supported(const std::wstring& path) {
return ov::util::directory_exists(path) && ov::util::file_exists(ov::util::path_join_w({path, L"saved_model.pb"}));
}
#endif
template <>
std::basic_string<char> get_saved_model_name<char>() {
return "/saved_model.pb";
}
template <>
std::basic_string<char> get_variables_index_name<char>() {
return "/variables/variables.index";
}
#if defined(OPENVINO_ENABLE_UNICODE_PATH_SUPPORT) && defined(_WIN32)
template <>
std::basic_string<wchar_t> get_saved_model_name<wchar_t>() {
return L"/saved_model.pb";
}
template <>
std::basic_string<wchar_t> get_variables_index_name<wchar_t>() {
return L"/variables/variables.index";
}
#endif
} // namespace tensorflow
} // namespace frontend
} // namespace ov

View File

@ -0,0 +1,154 @@
// Copyright (C) 2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <map>
#include "graph_iterator_proto.hpp"
#include "openvino/util/file_util.hpp"
#include "saved_model.pb.h"
namespace ov {
namespace frontend {
namespace tensorflow {
struct VIBlock;
// Stores information about variables index
class VariablesIndex {
// Contains file size for internal checks
size_t m_variables_index_size;
// Contains maximum amount of shards, used for creating corrext extension
int32_t m_total_shards;
// Contains BundleEntryProto variables list, readed from .index file
std::map<std::string, std::vector<char>> m_variables_index;
// List of opened data files for using with BundleEntryProto
std::map<int32_t, std::shared_ptr<std::ifstream>> m_data_files;
// List of mapped variables which could be read using TrackableObjectGraph
std::map<std::string, std::string> m_variables_map;
public:
/// \brief Reads variables from opened variable index file. Can cause an asserts in case of issues.
/// \param vi_stream Opened stream file, file pointer doesn't matter, it will be rewind internally.
/// \param path A path to file with variables data
/// \param is_saved_model Flag shows variables index is a part of Saved Model format
/// \returns Returns true in case of everything loads successfully, false otherwise
bool read_variables(std::ifstream& vi_stream, const std::string& path, const bool is_saved_model = true);
#if defined(OPENVINO_ENABLE_UNICODE_PATH_SUPPORT) && defined(_WIN32)
/// \brief Reads variables from opened variable index file. Can cause an asserts in case of issues.
/// \param vi_stream Opened stream file, file pointer doesn't matter, it will be rewind internally.
/// \param path A path to file with variables data
/// \param is_saved_model Flag shows variables index is a part of Saved Model format
/// \returns Returns true in case of everything loads successfully, false otherwise
bool read_variables(std::ifstream& vi_stream, const std::wstring& path, const bool is_saved_model = true);
#endif
/// \brief Returns data and size of data of stored variable
/// \param name Name of variable
/// \param data Pointer on a pointer where data pointer will be returned
/// \param size Pointer on a variable which will stores data size
/// \returns Returns true in case variable was found, false otherwise (data and size will be untouched)
bool get_variable(const std::string& name, const char** data, size_t* size) const {
auto varItem = m_variables_index.find(name);
if (varItem == m_variables_index.end()) {
return false;
}
if (data != nullptr) {
*data = varItem->second.data();
}
if (size != nullptr) {
*size = varItem->second.size();
}
return true;
}
/// \brief Returns data and size of data of mapped variable from trackable object graph to variables index
/// \param name Name of a mapping variable
/// \param data Pointer on a pointer where data pointer will be returned
/// \param size Pointer on a variable which will stores data size
/// \returns Returns true in case variable was found, false otherwise (data and size will be untouched)
bool get_mapped_variable(const std::string& name, const char** data, size_t* size) const {
auto mapItem = m_variables_map.find(name);
if (mapItem == m_variables_map.end()) {
return false;
}
return get_variable(mapItem->second, data, size);
}
/// \brief Checks if variable has a mapped pair
/// \param name Name of variable for checking existance
/// \returns True in case variable has mapped value and false otherwise
bool has_mapped_variable(const std::string& name) const {
auto mapItem = m_variables_map.find(name);
return mapItem != m_variables_map.end();
}
/// \brief Returns shared pointer to a requested shard_id, or nullptr in case of shard_id isn't found
/// \param shard_id Requested shard_id
/// \returns Valid shared_ptr with ifstream or with nullptr if shard isn't found
std::shared_ptr<std::ifstream> get_data_file(const int32_t shard_id) const {
auto result = m_data_files.find(shard_id);
return result != m_data_files.end() ? result->second : nullptr;
}
/// \brief Adds variable mapping to the variables map
/// \param var_name Variable full name (from .index file)
/// \param map_name Mapped name
/// \param rewrite Rewrite mapped value in case it exists
/// \returns True if map updated. False if nothing changed (if variable exists and rewrite is false).
bool map_variable(const std::string& var_name, const std::string& map_name, bool rewrite = false) {
if (m_variables_map.find(var_name) != m_variables_map.end() && rewrite == false) {
return false;
}
m_variables_map[var_name] = map_name;
return true;
}
/// \brief Reads relationship between VarHandleOp - RestoreV2 - AssignVariableOp and
/// stores this information in a provided key=value map. Where key - name of VarHandleOp,
/// value - long variable name which is stored in RestoreV2.
/// It needs to map VarHandleOp to right place in .index file.
/// \param[in] graph_def GraphDef object for analysis
/// \param[out] variables_map Map of variables found in graph_def
static void map_assignvariable(const std::shared_ptr<::tensorflow::GraphDef> graph_def,
std::map<std::string, std::string>& variables_map);
private:
/// \brief Reads block structure of .index file
/// \param[in,out] fs Filestream of .index file, position in file will be updated
/// \param[in] index Variables index block which stores information about block
/// \param[out] data Block data will be readed
/// \param[out] offset Offset of block start
/// \param[out] offset_end Offset of block end
void read_variables_index_block(std::ifstream& fs,
const VIBlock& index,
std::vector<char>& data,
uint32_t& offset,
uint32_t& offset_end);
/// \brief Reads key=value pair from provided pointer
/// \param[in,out] ptr Actual pointer, will be moved to the end of readed pair (to read next)
/// \param[in] ptr_end End of memory which shouldn't be passed in case of broken structure
/// \param[out] key Key name
/// \param[out] value Stored value for key (isn't a pure string, data block)
/// \param[out] val_lenght Length of readed value
void read_variables_index_pair(char*& ptr,
const char* ptr_end,
std::string& key,
char*& value,
uint32_t& val_length);
/// \brief Reads .index file and stores key=value map in provided varIndex
/// \param[in,out] fs Filestream should be parsed. Position in file will be updated
/// \param[out] varIndex Variables indx (key=value) from given filestream
void read_variables_index(std::ifstream& fs, std::map<std::string, std::vector<char>>& varIndex);
/// \brief Reads bundle header if it is available. Checks version and saves info about amount of shards
void read_bundle_header();
/// \brief Reads key=value map from storef _CHECKPOINTABLE_OBJECT_GRAPH variable
void read_checkpointable_object_graph();
};
} // namespace tensorflow
} // namespace frontend
} // namespace ov

View File

@ -506,6 +506,50 @@ TEST_F(TransformationTestsF, RaggedTensorToSparse) {
}
}
TEST_F(TransformationTestsF, SavedModelProgramOnly) {
{
model = convert_model("saved_model_program-only");
model->validate_nodes_and_infer_types();
}
{
// create a reference graph
auto x = make_shared<Constant>(element::f32, Shape{2, 3}, vector<float>{1, 2, 3, 3, 2, 1});
auto y = make_shared<Parameter>(element::f32, Shape{1});
auto add = make_shared<Add>(x, y);
model_ref = make_shared<Model>(OutputVector{add}, ParameterVector{y});
}
}
TEST_F(TransformationTestsF, SavedModelVariables) {
{
model = convert_model("saved_model_variables");
model->validate_nodes_and_infer_types();
}
{
// create a reference graph
auto x = make_shared<Parameter>(element::f32, Shape{1});
auto y = make_shared<Constant>(element::f32, Shape{}, vector<float>{123});
auto multiply = make_shared<Multiply>(x, y);
model_ref = make_shared<Model>(OutputVector{multiply}, ParameterVector{x});
}
}
TEST_F(TransformationTestsF, MetaGraphVariables) {
{
model = convert_model("metagraph_variables/graph.meta");
model->validate_nodes_and_infer_types();
}
{
// create a reference graph
auto x = make_shared<Constant>(element::f32, Shape{2, 3}, vector<float>{1, 2, 3, 3, 2, 1});
auto y = make_shared<Parameter>(element::f32, Shape{1});
auto add = make_shared<Add>(x, y);
model_ref = make_shared<Model>(OutputVector{add}, ParameterVector{y});
}
}
TEST_F(TransformationTestsF, SplitInFunction) {
{
// create FAKE conversion extension for Split using named ports, this is not required for Split, but it tests

View File

@ -0,0 +1,19 @@
# Copyright (C) 2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import os
import sys
import tensorflow as tf
# Create the graph and model
tf.compat.v1.reset_default_graph()
with tf.compat.v1.Session() as sess:
x_value = [[1.,2.,3.],[3.,2.,1.]]
tf_x = tf.Variable(x_value)
tf_y = tf.compat.v1.placeholder(dtype=tf.float32, shape=[1], name='y')
tf_z = tf.add(tf_x, tf_y, name="AddOperation")
sess.run(tf.compat.v1.global_variables_initializer())
saver = tf.compat.v1.train.Saver([tf_x])
os.makedirs(os.path.join(sys.argv[1], "metagraph_variables"))
saver.save(sess, os.path.join(sys.argv[1], "metagraph_variables", "graph"))

View File

@ -0,0 +1,16 @@
# Copyright (C) 2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import os
import sys
import tensorflow as tf
# Create the graph and model
tf.compat.v1.reset_default_graph()
with tf.compat.v1.Session() as sess:
x_value = [[1.,2.,3.],[3.,2.,1.]]
tf_x = tf.constant(x_value)
tf_y = tf.compat.v1.placeholder(dtype=tf.float32, shape=[1], name='y')
tf_z = tf.add(tf_x, tf_y, name="AddOperation")
tf.compat.v1.saved_model.simple_save(sess, os.path.join(sys.argv[1], "saved_model_program-only"), inputs={'x':tf_x, 'y':tf_y}, outputs={'z':tf_z})

View File

@ -0,0 +1,19 @@
# Copyright (C) 2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import os
import sys
import tensorflow as tf
# Create the graph and model
class AddVariable(tf.Module):
def __init__(self):
super(AddVariable, self).__init__()
self.var1 = tf.Variable(123.0)
@tf.function(input_signature=[tf.TensorSpec([1], tf.float32)])
def __call__(self, x):
return {'test_output_name': x * self.var1}
module = AddVariable()
tf.saved_model.save(module, os.path.join(sys.argv[1], "saved_model_variables"))

View File

@ -7,31 +7,31 @@
#include <string>
#include <vector>
#include "internal_operation.hpp"
#include "unsupported_constant.hpp"
namespace ov {
namespace frontend {
namespace tensorflow {
/// Pseudo-entity for storing strings
class StringConstant : public InternalOperation {
class StringConstant : public UnsupportedConstant {
public:
OPENVINO_OP("StringConstant", "ov::frontend::tensorflow::util", InternalOperation);
OPENVINO_OP("StringConstant", "ov::frontend::tensorflow::util", UnsupportedConstant);
StringConstant(ov::Any data, const std::shared_ptr<DecoderBase>& decoder = std::make_shared<DecoderFake>())
: InternalOperation(decoder, {}, 1),
: UnsupportedConstant(decoder),
m_data(data) {
validate_and_infer_types();
}
StringConstant(std::string& str, const std::shared_ptr<DecoderBase>& decoder = std::make_shared<DecoderFake>())
: InternalOperation(decoder, {}, 1),
: UnsupportedConstant(decoder),
m_data({str}) {
validate_and_infer_types();
}
StringConstant(const std::shared_ptr<DecoderBase>& decoder = std::make_shared<DecoderFake>())
: InternalOperation(decoder, {}, 1) {
: UnsupportedConstant(decoder) {
validate_and_infer_types();
}

View File

@ -4,7 +4,6 @@
#include "helper_transforms/const_to_result_remover.hpp"
#include "helper_ops/string_constant.hpp"
#include "helper_ops/unsupported_constant.hpp"
#include "openvino/opsets/opset10.hpp"
@ -23,8 +22,7 @@ bool ConstToResultRemover::run_on_model(const std::shared_ptr<ov::Model>& m) {
for (const auto& result : m->get_results()) {
auto unsupported_const = as_type_ptr<UnsupportedConstant>(result->get_input_node_shared_ptr(0));
auto const_node = as_type_ptr<Constant>(result->get_input_node_shared_ptr(0));
auto string_const = as_type_ptr<StringConstant>(result->get_input_node_shared_ptr(0));
if (unsupported_const || const_node || string_const) {
if (unsupported_const || const_node) {
results_to_remove.push_back(result);
}
}

View File

@ -26,7 +26,7 @@ bool SavedModelUnusedRemover::run_on_model(const std::shared_ptr<ov::Model>& m)
bool isUsed = false;
for (size_t i = 0; i < result->get_input_size(); ++i) {
const auto& node_names = result->get_input_tensor(i).get_names();
isUsed = std::find(node_names.begin(), node_names.end(), "saved_model_unused") == node_names.end();
isUsed |= std::find(node_names.begin(), node_names.end(), "saved_model_unused") == node_names.end();
}
if (!isUsed) {
results_to_remove.push_back(result);
@ -35,9 +35,10 @@ bool SavedModelUnusedRemover::run_on_model(const std::shared_ptr<ov::Model>& m)
auto param = as_type_ptr<Parameter>(result->get_input_node_shared_ptr(0));
if (param) {
isUsed = false;
for (size_t i = 0; i < param->get_output_size(); ++i) {
const auto& node_names = param->get_output_tensor(i).get_names();
isUsed = std::find(node_names.begin(), node_names.end(), "saved_model_unused") == node_names.end();
isUsed |= std::find(node_names.begin(), node_names.end(), "saved_model_unused") == node_names.end();
}
if (!isUsed) {
results_to_remove.push_back(result);
@ -50,7 +51,7 @@ bool SavedModelUnusedRemover::run_on_model(const std::shared_ptr<ov::Model>& m)
bool isUsed = false;
for (size_t i = 0; i < param->get_output_size(); ++i) {
const auto& node_names = param->get_output_tensor(i).get_names();
isUsed = std::find(node_names.begin(), node_names.end(), "saved_model_unused") == node_names.end();
isUsed |= std::find(node_names.begin(), node_names.end(), "saved_model_unused") == node_names.end();
}
if (!isUsed && std::find(params_to_remove.begin(), params_to_remove.end(), param) == params_to_remove.end()) {
params_to_remove.push_back(param);

View File

@ -258,10 +258,10 @@ NamedOutputVector translate_fused_batch_norm_op(const NodeContext& node) {
// set node names and tensor names
set_node_name(node.get_name(), fused_batch_norm.get_node_shared_ptr());
set_node_name(node.get_name() + ":1", batch_mean.get_node_shared_ptr());
set_node_name(node.get_name() + ":2", batch_variance.get_node_shared_ptr());
set_node_name(node.get_name() + ":3", zero_const.get_node_shared_ptr());
set_node_name(node.get_name() + ":4", zero_const2.get_node_shared_ptr());
set_out_name(node.get_name() + ":1", batch_mean.get_node_shared_ptr());
set_out_name(node.get_name() + ":2", batch_variance.get_node_shared_ptr());
set_out_name(node.get_name() + ":3", zero_const.get_node_shared_ptr());
set_out_name(node.get_name() + ":4", zero_const2.get_node_shared_ptr());
auto results = NamedOutputVector{{"y", fused_batch_norm},
{"batch_mean", batch_mean},
@ -270,7 +270,7 @@ NamedOutputVector translate_fused_batch_norm_op(const NodeContext& node) {
{"reserve_space_2", zero_const2}};
if (is_v3) {
auto zero_const3 = create_same_type_const_scalar<float>(scale, 0);
set_node_name(node.get_name() + ":5", zero_const3.get_node_shared_ptr());
set_out_name(node.get_name() + ":5", zero_const3.get_node_shared_ptr());
results.push_back({"reserve_space_3", zero_const3});
}

View File

@ -337,6 +337,10 @@ def convert_to_pb(argv: argparse.Namespace):
isinstance(argv.input_model, str):
return None
# Saved Model format and MetaGraph format is supported without freezing
if argv.saved_model_dir or argv.input_meta_graph:
return None
user_output_node_names_list = argv.output if argv.output else None
if user_output_node_names_list is not None and not isinstance(user_output_node_names_list, list):
user_output_node_names_list = user_output_node_names_list.split(',')

View File

@ -17,7 +17,7 @@ from openvino.runtime.utils.types import get_element_type, \
get_numpy_ctype # pylint: disable=no-name-in-module,import-error
from openvino.tools.mo.middle.passes.infer import validate_batch_in_shape
from openvino.tools.mo.moc_frontend.analysis import json_model_analysis_dump
from openvino.tools.mo.moc_frontend.extractor import fe_user_data_repack, convert_params_lists_to_dicts
from openvino.tools.mo.moc_frontend.extractor import fe_user_data_repack, convert_params_lists_to_dicts, fe_output_user_data_repack
from openvino.tools.mo.moc_frontend.layout_utils import update_layout_to_dict, get_dimension_index_by_label
from openvino.tools.mo.utils.class_registration import get_enabled_and_disabled_transforms
from openvino.tools.mo.utils.error import Error
@ -34,7 +34,18 @@ def moc_pipeline(argv: argparse.Namespace, moc_front_end: FrontEnd):
raise Exception("ONNX frontend does not support input model as BytesIO object. "
"Please use use_legacy_frontend=True to convert the model.")
else:
input_model = moc_front_end.load(argv.input_model)
if argv.input_model:
input_model = moc_front_end.load(argv.input_model)
elif argv.saved_model_dir:
input_model = moc_front_end.load(argv.saved_model_dir)
elif argv.input_meta_graph:
input_model = moc_front_end.load(argv.input_meta_graph)
if argv.output:
# Simulate original behavior with freezing model
# While freezing we do a cutting of model, to keep similar behavior we
# need to simulate similar behavior with natively supported model
outputs = fe_output_user_data_repack(input_model, argv.output, moc_front_end.get_name())
input_model.override_all_outputs([x['node'] for x in outputs])
argv.placeholder_shapes, argv.placeholder_data_types, argv.freeze_placeholder_with_value = convert_params_lists_to_dicts(
input_model, argv.placeholder_shapes, argv.placeholder_data_types,
@ -130,6 +141,9 @@ def moc_pipeline(argv: argparse.Namespace, moc_front_end: FrontEnd):
add_names_to_tensors(input_model, user_shapes)
new_output_places = [x['node'] for x in outputs]
input_model.override_all_outputs(new_output_places)
# invalidation of existing Place objects could have happened in the operation above
if user_shapes:
model_inputs = input_model.get_inputs()
if user_shapes:
for user_shape in user_shapes:

View File

@ -53,8 +53,7 @@ class ConvertToPBTests(unittest.TestCase):
self.argv.input_meta_graph = os.path.join(tmp_dir, 'model.meta')
self.argv.output_dir = tmp_dir
path_to_pb = convert_to_pb(self.argv)
self.assertTrue(os.path.exists(path_to_pb), "The auxiliary .pb is not generated")
self.assertTrue(os.path.getsize(path_to_pb) != 0, "The auxiliary .pb is empty")
self.assertTrue(path_to_pb is None, "Auxiliary .pb must not be generated for .meta")
def test_text_frozen_format(self):
try:
@ -155,12 +154,10 @@ class ConvertToPBTests(unittest.TestCase):
self.argv.input_meta_graph = os.path.join(tmp_dir, 'model1.meta')
self.argv.output_dir = tmp_dir
path_to_pb = convert_to_pb(self.argv)
self.assertTrue(os.path.exists(path_to_pb), "The auxiliary .pb is not generated")
self.assertTrue(os.path.getsize(path_to_pb) != 0, "The auxiliary .pb is empty")
self.assertTrue(path_to_pb is None, "Auxiliary .pb must not be generated for .meta")
self.argv.input_meta_graph = os.path.join(tmp_dir, 'model2.meta')
self.argv.output_dir = tmp_dir
self.argv.input_model = None
path_to_pb = convert_to_pb(self.argv)
self.assertTrue(os.path.exists(path_to_pb), "The auxiliary .pb is not generated")
self.assertTrue(os.path.getsize(path_to_pb) != 0, "The auxiliary .pb is empty")
self.assertTrue(path_to_pb is None, "Auxiliary .pb must not be generated for .meta")