[TF FE] Refactor constant reading to not use protobuf directly (#12518)

* Refactor constant reading

* Remove needless code

* Implement compressed value reading

* Remove needless protobuf headers

* Remove commented code

* Remove unnecessary comment

* Apply review feedback

* Fix linux build

* Fix win build

* Fix copyright
This commit is contained in:
Maxim Vafin
2022-08-19 13:29:35 +02:00
committed by GitHub
parent fe96bb2f7c
commit 3e9cc7d52d
13 changed files with 149 additions and 226 deletions

View File

@@ -19,13 +19,6 @@ public:
/// \return Shared pointer to appropriate value converted to openvino data type if it exists, 'nullptr' otherwise
virtual ov::Any get_attribute(const std::string& name) const = 0;
/// \brief Get attribute value by name
///
/// \param name Attribute name
/// \return Shared pointer to appropriate value in native tensorflow data type if it exists,
/// 'nullptr' otherwise
virtual ov::Any get_native_attribute(const std::string& name) const = 0;
/// \brief Get a number of inputs
virtual size_t get_input_size() const = 0;

View File

@@ -4,7 +4,10 @@
#include "decoder_proto.hpp"
#include "attr_value.pb.h"
#include "node_def.pb.h"
#include "openvino/frontend/tensorflow/node_context.hpp"
#include "types.pb.h"
namespace ov {
namespace frontend {
@@ -25,23 +28,61 @@ const std::map<::tensorflow::DataType, ov::element::Type>& TYPE_MAP() {
{::tensorflow::DataType::DT_BFLOAT16, ov::element::bf16}};
return type_map;
}
} // namespace
ov::Any DecoderProto::get_native_attribute(const std::string& name) const {
auto attrs = decode_attribute_helper(name);
if (attrs.empty()) {
return {};
}
template <typename T>
void extract_tensor_content(const std::string& tensor_content, ov::Tensor* values) {
const auto tensor_content_size = tensor_content.size();
FRONT_END_GENERAL_CHECK(tensor_content_size % sizeof(T) == 0,
"Size of tensor_content (",
tensor_content_size,
") is not a multiple of ",
sizeof(T));
switch (attrs[0].value_case()) {
case ::tensorflow::AttrValue::ValueCase::kTensor:
return attrs[0].tensor();
case ::tensorflow::AttrValue::ValueCase::kType:
return attrs[0].type();
default:
FRONT_END_GENERAL_CHECK(false, "DataType is not covered.");
const T* tensor_values = reinterpret_cast<const T*>(tensor_content.data());
FRONT_END_GENERAL_CHECK(values->get_size() == tensor_content_size / sizeof(T),
"Size of tensor is not equal to tensor_content size.");
std::copy(tensor_values, tensor_values + tensor_content_size / sizeof(T), values->data<T>());
}
template <typename T>
void extract_compressed_tensor_content(const ::tensorflow::TensorProto& tensor_proto,
int64_t val_size,
ov::Tensor* values) {
auto val_lastsaved = static_cast<T>(0);
auto values_data = values->data<T>();
for (auto i = 0; i < values->get_size(); i++) {
if (val_size == 0) {
values_data[i] = static_cast<T>(0);
} else if (i < val_size) {
auto val_i = static_cast<T>(0);
switch (values->get_element_type()) {
// TODO: there are more element types to support here
case ov::element::boolean:
val_i = tensor_proto.bool_val()[i];
break;
case ov::element::i32:
val_i = tensor_proto.int_val()[i];
break;
case ov::element::i64:
val_i = tensor_proto.int64_val()[i];
break;
case ov::element::f32:
val_i = tensor_proto.float_val()[i];
break;
case ov::element::f64:
val_i = tensor_proto.double_val()[i];
break;
default:
FRONT_END_THROW("Encountered unknown element type " + values->get_element_type().get_type_name());
}
values_data[i] = val_i;
val_lastsaved = val_i;
} else {
values_data[i] = val_lastsaved;
}
}
}
} // namespace
ov::Any DecoderProto::get_attribute(const std::string& name) const {
auto attrs = decode_attribute_helper(name);
@@ -122,11 +163,82 @@ ov::Any DecoderProto::get_attribute(const std::string& name) const {
"' attribute is not supported.");
}
case ::tensorflow::AttrValue::ValueCase::kTensor:
FRONT_END_GENERAL_CHECK(false,
"Conversion from Tensorflow to OpenVINO data type failed: Tensor type for '",
name,
"' attribute is not supported.");
case ::tensorflow::AttrValue::ValueCase::kTensor: {
const auto& tensor_proto = attrs[0].tensor();
const auto& tf_shape = tensor_proto.tensor_shape();
ov::PartialShape pshape;
for (int i = 0; i < tf_shape.dim_size(); i++) {
pshape.push_back(tf_shape.dim(i).size());
}
FRONT_END_GENERAL_CHECK(pshape.is_static(), "Dynamic shapes are not supported for Tensor attribute.");
const auto& tf_type = tensor_proto.dtype();
FRONT_END_GENERAL_CHECK(
TYPE_MAP().count(tf_type),
"Encountered unknown element type " + DataType_Name(tf_type) + " on an empty tensor_proto");
auto ov_type = TYPE_MAP().at(tf_type);
ov::Tensor res(ov_type, pshape.get_shape());
auto tensor_content = tensor_proto.tensor_content();
if (!tensor_content.empty() && tensor_proto.has_tensor_shape()) {
switch (ov_type) {
case ov::element::u8:
extract_tensor_content<uint8_t>(tensor_content, &res);
break;
case ov::element::i8:
extract_tensor_content<int8_t>(tensor_content, &res);
break;
case ov::element::i16:
extract_tensor_content<int16_t>(tensor_content, &res);
break;
case ov::element::i32:
extract_tensor_content<int32_t>(tensor_content, &res);
break;
case ov::element::i64:
extract_tensor_content<int64_t>(tensor_content, &res);
break;
case ov::element::f16:
extract_tensor_content<float16>(tensor_content, &res);
break;
case ov::element::f32:
extract_tensor_content<float>(tensor_content, &res);
break;
case ov::element::f64:
extract_tensor_content<double>(tensor_content, &res);
break;
case ov::element::bf16:
extract_tensor_content<bfloat16>(tensor_content, &res);
break;
default:
FRONT_END_THROW("Encountered unknown element type " + ov_type.get_type_name());
}
} else {
int64_t val_size = 0;
switch (ov_type) {
case ov::element::boolean:
val_size = tensor_proto.bool_val_size();
extract_compressed_tensor_content<bool>(tensor_proto, val_size, &res);
break;
case ov::element::i32:
val_size = tensor_proto.int_val_size();
extract_compressed_tensor_content<int32_t>(tensor_proto, val_size, &res);
break;
case ov::element::i64:
val_size = tensor_proto.int64_val_size();
extract_compressed_tensor_content<int64_t>(tensor_proto, val_size, &res);
break;
case ov::element::f32:
val_size = tensor_proto.float_val_size();
extract_compressed_tensor_content<float>(tensor_proto, val_size, &res);
break;
case ov::element::f64:
val_size = tensor_proto.double_val_size();
extract_compressed_tensor_content<double>(tensor_proto, val_size, &res);
break;
default:
FRONT_END_THROW("Encountered unknown element type " + ov_type.get_type_name());
}
}
return res;
}
case ::tensorflow::AttrValue::ValueCase::kPlaceholder:
FRONT_END_GENERAL_CHECK(false,
"Conversion from Tensorflow to OpenVINO data type failed: Placeholder type for '",

View File

@@ -7,10 +7,12 @@
#include <string>
#include <vector>
#include "attr_value.pb.h"
#include "node_def.pb.h"
#include "openvino/frontend/tensorflow/decoder.hpp"
#include "types.pb.h"
namespace tensorflow {
class NodeDef;
class AttrValue;
} // namespace tensorflow
namespace ov {
namespace frontend {
@@ -22,8 +24,6 @@ public:
ov::Any get_attribute(const std::string& name) const override;
ov::Any get_native_attribute(const std::string& name) const override;
size_t get_input_size() const override;
void get_input_node(size_t input_port_idx,

View File

@@ -4,12 +4,14 @@
#include "openvino/frontend/tensorflow/frontend.hpp"
#include "graph_iterator_proto.hpp"
#include "input_model.hpp"
#include "op_table.hpp"
#include "openvino/frontend/tensorflow/extension/conversion.hpp"
#include "openvino/frontend/tensorflow/graph_iterator.hpp"
#include "openvino/pass/manager.hpp"
#include "openvino/util/common_util.hpp"
#include "openvino/util/log.hpp"
#include "pass/transpose_sinking.hpp"
#include "so_extension.hpp"
#include "tf_framework_node.hpp"

View File

@@ -5,6 +5,7 @@
#include "input_model.hpp"
#include <fstream>
#include <iterator>
#include <queue>
#include "openvino/frontend/exception.hpp"
@@ -14,8 +15,6 @@
#include "place.hpp"
#include "utils.hpp"
using namespace google;
namespace ov {
namespace frontend {
namespace tensorflow {

View File

@@ -13,45 +13,10 @@ namespace frontend {
namespace tensorflow {
namespace op {
namespace {
using ConstMap = std::map<ov::element::Type,
std::pair<std::function<void(const NodeContext&, ov::element::Type, ov::Output<ov::Node>&)>,
const ov::element::Type>>;
const ConstMap& TF_OPENVINO_CONST_MAP() {
static const ConstMap the_map = {
{ov::element::f32, make_pair(make_const_op<float>, ov::element::f32)},
{ov::element::f64, make_pair(make_const_op<double>, ov::element::f64)},
{ov::element::i8, make_pair(make_const_op<int8_t>, ov::element::i8)},
{ov::element::i16, make_pair(make_const_op<int16_t>, ov::element::i16)},
#if 0
{DataType::DT_QINT8, make_pair(make_const_op<qint8>, ov::element::i8)},
{DataType::DT_QUINT8, make_pair(make_const_op<quint8>, ov::element::u8)},
{DataType::DT_QUINT16, make_pair(make_const_op<quint16>, ov::element::u16)},
#endif
{ov::element::i32, make_pair(make_const_op<int32_t>, ov::element::i32)},
{ov::element::i64, make_pair(make_const_op<int64_t>, ov::element::i64)},
{ov::element::u8, make_pair(make_const_op<uint8_t>, ov::element::u8)},
{ov::element::u16, make_pair(make_const_op<uint16_t>, ov::element::u16)},
{ov::element::boolean, make_pair(make_const_op<bool, char>, ov::element::boolean)}
};
return the_map;
}
} // namespace
OutputVector translate_const_op(const NodeContext& node) {
auto dt = node.get_attribute<ov::element::Type>("dtype");
Output<Node> res;
// TODO: fix DT_UINT32 and DT_UINT64 support
// no specialization of tensorflow::checkpoint::SavedTypeTraits...)
try {
const auto& func_param = TF_OPENVINO_CONST_MAP().at(dt);
func_param.first(node, func_param.second, res);
} catch (const std::out_of_range&) {
TENSORFLOW_OP_VALIDATION(node, false, "Failed to translate Constant with target OV type:" + dt.get_type_name());
}
set_node_name(node.get_name(), res.get_node_shared_ptr());
auto tensor = node.get_attribute<ov::Tensor>("value");
auto res = std::make_shared<ov::opset8::Constant>(tensor.get_element_type(), tensor.get_shape(), tensor.data());
set_node_name(node.get_name(), res);
return {res};
}
} // namespace op

View File

@@ -4,6 +4,7 @@
#include "op_table.hpp"
#include "openvino/opsets/opset8.hpp"
#include "openvino/util/log.hpp"
using namespace std;
using namespace ov::opset8;

View File

@@ -2,6 +2,8 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <climits>
#include "op_table.hpp"
#include "openvino/opsets/opset8.hpp"

View File

@@ -6,16 +6,12 @@
#include <string>
#include "graph.pb.h"
#include "openvino/opsets/opset8.hpp"
#include "types.pb.h"
namespace ov {
namespace frontend {
namespace tensorflow {
using ::tensorflow::DataType;
std::shared_ptr<ov::opset8::Transpose> make_transpose(const ov::Output<ov::Node>& arg,
const ov::AxisVector& input_order);

View File

@@ -8,6 +8,7 @@
#include "openvino/opsets/opset8.hpp"
#include "openvino/pass/pattern/op/label.hpp"
#include "openvino/util/common_util.hpp"
#include "openvino/util/log.hpp"
#include "openvino_conversions.hpp"
#include "utils.hpp"

View File

@@ -4,11 +4,8 @@
#include "place.hpp"
#include "op_def.pb.h"
#include "openvino/frontend/exception.hpp"
#include "openvino/frontend/tensorflow/node_context.hpp"
#include "tensor.pb.h"
#include "types.pb.h"
namespace ov {
namespace frontend {

View File

@@ -5,18 +5,10 @@
#include "utils.hpp"
#include "openvino/opsets/opset8.hpp"
#include "openvino_conversions.hpp"
using namespace ov::opset8;
void ov::frontend::tensorflow::tf_shape_to_ov_shape(const ::tensorflow::TensorShapeProto& tf_shape,
ov::PartialShape* ng_shape) {
std::vector<ov::Dimension> dims;
for (int i = 0; i < tf_shape.dim_size(); i++) {
dims.emplace_back(tf_shape.dim(i).size());
}
*ng_shape = ov::PartialShape(dims);
}
void ov::frontend::tensorflow::set_node_name(const std::string& node_name, const std::shared_ptr<Node>& node) {
const auto& outputs = node->outputs();
node->set_friendly_name(node_name);

View File

@@ -1,31 +1,12 @@
/* Copyright (C) 2018-2022 Intel Corporation
* SPDX-License-Identifier: Apache-2.0
*
* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* We modified "values_from_const_node" function from
* tensorflow/core/grappler/optimizers/arithmetic_optimizer.cc file
* to integrate it with our infrastructure. The purpose and basic
* functionality remains the same.
==============================================================================*/
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "graph_iterator_proto.hpp"
#include "openvino/core/validation_util.hpp"
#include "openvino/frontend/tensorflow/node_context.hpp"
#include "openvino/opsets/opset8.hpp"
#include "openvino/util/log.hpp"
#include "openvino_conversions.hpp"
namespace ov {
namespace frontend {
@@ -71,8 +52,6 @@ void make_padding(const std::string& tf_padding_type,
}
}
void tf_shape_to_ov_shape(const ::tensorflow::TensorShapeProto& tf_shape, ov::PartialShape* ng_shape);
template <typename T>
void get_const_input(const NodeContext& node, int64_t input_index, std::vector<T>* vector) {
auto ng_input = node.get_input(input_index);
@@ -83,122 +62,6 @@ void get_const_input(const NodeContext& node, int64_t input_index, std::vector<T
FRONT_END_THROW("Node must be converted to Constant.");
}
// Taken from: tensorflow/core/grappler/optimizers/arithmetic_optimizer.cc
// Extract values from a Const op to `values`. Returns true if succeeds.
//
// Modified with an extra `VecT` parameter to handle the case where the type
// in the std::vector does not match TensorFlow's notion of what the C++ type
// should be (e.g. when T is `bool`, we actually need a std::vector of `char` for
// compatibility with OpenVINO).
template <typename T, typename VecT = T>
void values_from_const_node(const NodeContext& node, ov::Shape* const_tensor_shape, std::vector<VecT>* values) {
TENSORFLOW_OP_VALIDATION(node, node.get_op_type() == "Const", "Node is expected to be Constant.");
const auto* decoder = node.get_decoder();
auto dt = decoder->get_native_attribute("dtype").as<::tensorflow::DataType>();
// TODO: investigate why as<>() && method using std::move leads to the issue (75371) in OVTF integration with
// tensorflow frontend. The current fix: replace it with as<>() & method. But in fact, both
// approaches should work the same way.
// auto tensor_proto = decoder->get_native_attribute("value").as<::tensorflow::TensorProto>();
auto value = decoder->get_native_attribute("value");
auto tensor_proto = value.as<::tensorflow::TensorProto>();
const ::tensorflow::TensorShapeProto& shape = tensor_proto.tensor_shape();
ov::PartialShape pshape;
tf_shape_to_ov_shape(shape, &pshape);
*const_tensor_shape = pshape.get_shape();
TENSORFLOW_OP_VALIDATION(node, pshape.is_static(), "Dynamic shapes are not supported in Constant conversion.");
auto tensor_content = tensor_proto.tensor_content();
std::vector<char> tensor_values_plain(tensor_content.begin(), tensor_content.end());
const T* tensor_values = reinterpret_cast<const T*>(tensor_values_plain.data());
if (!tensor_values_plain.empty() && tensor_proto.has_tensor_shape()) {
// When tensor_shape is set, theoretically the representation of the data
// could be compressed. So, before copying values to the returned vector,
// make sure no compression happens.
// if (shape.dim_size() == 1 && shape.dim(0).size() == tensor_values_plain.size()/sizeof(T)) {
values->insert(values->end(), tensor_values, tensor_values + tensor_values_plain.size() / sizeof(T));
return;
//}
}
const auto tensor_content_size = tensor_proto.tensor_content().size();
if (tensor_content_size % sizeof(VecT)) {
std::cerr << "[ ERROR ] tensor_content_size (" << tensor_content_size << ") is not a multiple of "
<< sizeof(VecT);
}
// If tensor_content_size is zero, we'll have to take the values from
// int_val, float_val, etc.
if (tensor_content_size == 0) {
int64_t n_elements = 1;
for (auto i = 0; i < shape.dim_size(); i++) {
TENSORFLOW_OP_VALIDATION(node,
shape.dim(i).size() >= 0,
"Const node has empty tensor and an unknown dimension size");
n_elements *= shape.dim(i).size();
}
values->resize(n_elements);
auto val_lastsaved = (T)0; // cast
for (auto i = 0; i < n_elements; i++) {
int64_t val_size = 0;
auto val_i = (T)0; // cast
switch (dt) {
// TODO: there are more element types to support
// here
case ::tensorflow::DT_INT32:
val_size = tensor_proto.int_val_size();
if (val_size > 0)
val_i = tensor_proto.int_val()[i];
break;
case ::tensorflow::DT_INT64:
val_size = tensor_proto.int64_val_size();
if (val_size > 0)
val_i = tensor_proto.int64_val()[i];
break;
case ::tensorflow::DT_FLOAT:
val_size = tensor_proto.float_val_size();
if (val_size > 0)
val_i = tensor_proto.float_val()[i];
break;
case ::tensorflow::DT_BOOL:
val_size = tensor_proto.bool_val_size();
if (val_size > 0)
val_i = tensor_proto.bool_val()[i];
break;
case ::tensorflow::DT_DOUBLE:
val_size = tensor_proto.double_val_size();
if (val_size > 0)
val_i = tensor_proto.double_val()[i];
break;
default:
OPENVINO_DEBUG << "Const node has empty tensor_proto and we don't know how to "
"handle this element type";
FRONT_END_THROW("Encountered unknown element type " + DataType_Name(dt) + " on an empty tensor_proto");
}
if (val_size == 0) {
(*values)[i] = static_cast<T>(0);
} else if (i < val_size) {
(*values)[i] = val_i;
val_lastsaved = val_i;
} else {
(*values)[i] = val_lastsaved;
}
}
} else {
return;
}
}
template <typename T, typename VecT = T>
void make_const_op(const NodeContext& node, element::Type et, ov::Output<ov::Node>& ng_node) {
std::vector<VecT> const_values;
ov::Shape ng_shape;
values_from_const_node<T, VecT>(node, &ng_shape, &const_values);
ng_node = std::make_shared<ov::opset8::Constant>(et, ng_shape, const_values);
};
ov::op::PadType convert_tf_padding(const NodeContext& node, const std::string& tf_padding);
ov::OutputVector translate_convolution_op(const NodeContext& node, size_t spatial_dims_num);