[Core] Support String Tensors (#21244)

* [Core] Support String Tensors

Signed-off-by: Kazantsev, Roman <roman.kazantsev@intel.com>

* Add String Constant implementation

Signed-off-by: Kazantsev, Roman <roman.kazantsev@intel.com>

* Fix build issue in tests

* Add cast_vector for Constant of ov::string type

Signed-off-by: Kazantsev, Roman <roman.kazantsev@intel.com>

* Fix build issue

Signed-off-by: Kazantsev, Roman <roman.kazantsev@intel.com>

* Fix build issue: ambiguous type in GNA

* Fix ambiguous build issue in GNA tests

Signed-off-by: Kazantsev, Roman <roman.kazantsev@intel.com>

* Fix code-style

* Fix code-style

Signed-off-by: Kazantsev, Roman <roman.kazantsev@intel.com>

* Fix ambiguous build issue in GNA tests

Signed-off-by: Kazantsev, Roman <roman.kazantsev@intel.com>

* Fix ambiguous build issue in TF FE tests

Signed-off-by: Kazantsev, Roman <roman.kazantsev@intel.com>

* Update openvino.style for naming convention check

Signed-off-by: Kazantsev, Roman <roman.kazantsev@intel.com>

* Fix compilation error in core unit tests - need typename

Signed-off-by: Kazantsev, Roman <roman.kazantsev@intel.com>

* Add test for new element_type

Signed-off-by: Kazantsev, Roman <roman.kazantsev@intel.com>

* Fix code-style

Signed-off-by: Kazantsev, Roman <roman.kazantsev@intel.com>

* Update src/inference/src/dev/make_tensor.cpp

Co-authored-by: Ilya Lavrenov <ilya.lavrenov@intel.com>

* Add support of string Tensors for Constant

Signed-off-by: Kazantsev, Roman <roman.kazantsev@intel.com>

* Fix copying string tensor value for Constant

Signed-off-by: Kazantsev, Roman <roman.kazantsev@intel.com>

* Complete template methods for Constant

Signed-off-by: Kazantsev, Roman <roman.kazantsev@intel.com>

* Improve performance for initialization and destruction of string Tensor for set_shape

Signed-off-by: Kazantsev, Roman <roman.kazantsev@intel.com>

* Add check for string value in test

Signed-off-by: Kazantsev, Roman <roman.kazantsev@intel.com>

* Remove unused variable

Signed-off-by: Kazantsev, Roman <roman.kazantsev@intel.com>

* Update src/inference/src/dev/make_tensor.cpp

* Fix copy_to for ITensor of string type and add tests

Signed-off-by: Kazantsev, Roman <roman.kazantsev@intel.com>

* Add tests for Constant of string type and serialization

Signed-off-by: Kazantsev, Roman <roman.kazantsev@intel.com>

* Use memset_allocation to switch initialization

Signed-off-by: Kazantsev, Roman <roman.kazantsev@intel.com>

* Add additional documentation for host_ptr

Signed-off-by: Kazantsev, Roman <roman.kazantsev@intel.com>

* Update src/core/src/op/constant.cpp

* Use OPENVINO_THROW

Signed-off-by: Kazantsev, Roman <roman.kazantsev@intel.com>

* Update src/core/include/openvino/op/constant.hpp

* Update src/core/include/openvino/op/constant.hpp

Co-authored-by: Pawel Raasz <pawel.raasz@intel.com>

* Apply code-review feedback: use string_size

Signed-off-by: Kazantsev, Roman <roman.kazantsev@intel.com>

* Apply code-review feedback

Signed-off-by: Kazantsev, Roman <roman.kazantsev@intel.com>

* Recover evaluate impl for non-string type

Signed-off-by: Kazantsev, Roman <roman.kazantsev@intel.com>

* Fix code for creating of string constant for legacy non HostTensor tensor

Signed-off-by: Kazantsev, Roman <roman.kazantsev@intel.com>

* Fix build issue

Signed-off-by: Kazantsev, Roman <roman.kazantsev@intel.com>

* Apply code-review feedback: simplify copy_to method

Signed-off-by: Kazantsev, Roman <roman.kazantsev@intel.com>

* Fix build issue

Signed-off-by: Kazantsev, Roman <roman.kazantsev@intel.com>

* Use StringAlignedBuffer to store string Constant values

Signed-off-by: Kazantsev, Roman <roman.kazantsev@intel.com>

* Remove not needed methods in StringAlignedBuffer

Signed-off-by: Kazantsev, Roman <roman.kazantsev@intel.com>

* Refactor set_shape method

Signed-off-by: Kazantsev, Roman <roman.kazantsev@intel.com>

---------

Signed-off-by: Kazantsev, Roman <roman.kazantsev@intel.com>
Co-authored-by: Ilya Lavrenov <ilya.lavrenov@intel.com>
Co-authored-by: Pawel Raasz <pawel.raasz@intel.com>
This commit is contained in:
Roman Kazantsev 2023-12-01 11:17:53 +04:00 committed by GitHub
parent 7bb542fa70
commit abfbdd1b96
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
40 changed files with 966 additions and 115 deletions

View File

@ -18,7 +18,7 @@ VariableReference: '^\w+$'
EnumName: '^[A-Z][\w]+$'
# excepts element_type
EnumConstantName: '^([A-Z\d_]+|undefined|dynamic|boolean|bf16|f16|f32|f64|i4|i8|i16|i32|i64|u1|u4|u8|u16|u32|u64|nf4|asymmetric|align_corners|round_prefer_floor|round_prefer_ceil|floor|ceil|simple|nearest|linear|linear_onnx|cubic|area|scales|sizes|half_pixel|tf_half_pixel_for_nn|pytorch_half_pixel|asymetric)$'
EnumConstantName: '^([A-Z\d_]+|undefined|dynamic|boolean|bf16|f16|f32|f64|i4|i8|i16|i32|i64|u1|u4|u8|u16|u32|u64|nf4|string|asymmetric|align_corners|round_prefer_floor|round_prefer_ceil|floor|ceil|simple|nearest|linear|linear_onnx|cubic|area|scales|sizes|half_pixel|tf_half_pixel_for_nn|pytorch_half_pixel|asymetric)$'
# TODO: align
UsingDeclaration: '^.*$'
TypedefName: '^.*$'

View File

@ -172,8 +172,8 @@ ov::pass::GRUCellFusion::GRUCellFusion() {
auto squeeze_B = rg.make<ov::op::v0::Squeeze>(Bzrh, axis_0);
string act_name_1 = pattern_map.at(activation_1)->get_type_name();
string act_name_2 = pattern_map.at(activation_2)->get_type_name();
std::string act_name_1 = pattern_map.at(activation_1)->get_type_name();
std::string act_name_2 = pattern_map.at(activation_2)->get_type_name();
auto to_lower = [](unsigned char c) {
return std::tolower(c);
};
@ -186,7 +186,7 @@ ov::pass::GRUCellFusion::GRUCellFusion() {
Rzrh,
squeeze_B,
hidden_size,
vector<string>{act_name_1, act_name_2});
vector<std::string>{act_name_1, act_name_2});
cell->set_friendly_name(m.get_match_root()->get_friendly_name());
copy_runtime_info(m.get_matched_nodes(), rg.get());

View File

@ -21,7 +21,7 @@ namespace {
enum class WeightsFormat { zr, rz };
Output<Node> create_activation_by_name(const string& activation_name, const Output<Node>& input) {
Output<Node> create_activation_by_name(const std::string& activation_name, const Output<Node>& input) {
if (activation_name == "sigmoid") {
return make_shared<Sigmoid>(input);
} else if (activation_name == "tanh") {
@ -33,8 +33,8 @@ Output<Node> create_activation_by_name(const string& activation_name, const Outp
}
shared_ptr<Model> gen_model(WeightsFormat format,
const string& activation_1,
const string& activation_2,
const std::string& activation_1,
const std::string& activation_2,
size_t batch,
size_t hidden_size,
size_t input_size,
@ -83,8 +83,8 @@ shared_ptr<Model> gen_model(WeightsFormat format,
}
shared_ptr<Model> gen_reference(WeightsFormat format,
const string& activation_1,
const string& activation_2,
const std::string& activation_1,
const std::string& activation_2,
size_t batch,
size_t hidden_size,
size_t input_size,
@ -132,15 +132,15 @@ shared_ptr<Model> gen_reference(WeightsFormat format,
auto squeeze_B = make_shared<Squeeze>(Bzrh, axis_0);
auto cell =
make_shared<GRUCell>(X, H, Wzrh, Rzrh, squeeze_B, hidden_size, vector<string>{activation_1, activation_2});
make_shared<GRUCell>(X, H, Wzrh, Rzrh, squeeze_B, hidden_size, vector<std::string>{activation_1, activation_2});
return make_shared<Model>(OutputVector{cell}, params);
}
} // namespace
struct GRUFusionParams {
WeightsFormat format;
string activation_1;
string activation_2;
std::string activation_1;
std::string activation_2;
size_t batch;
size_t hidden_size;
size_t input_size;

View File

@ -102,6 +102,9 @@ std::shared_ptr<Node> make_constant(const element::Type& type, const Shape& shap
case element::Type_t::nf4:
unsupported_data_type = "nf4";
break;
case element::Type_t::string:
unsupported_data_type = "string";
break;
case element::Type_t::undefined:
unsupported_data_type = "undefined";
break;

View File

@ -0,0 +1,28 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "openvino/runtime/aligned_buffer.hpp"
namespace ov {
/// \brief StringAlignedBuffer class to store pointer to pre-allocated buffer with std::string objects
/// it is responsible for deallocation of std::string objects that will be stored in the buffer
class StringAlignedBuffer : public ov::AlignedBuffer {
public:
StringAlignedBuffer() = default;
StringAlignedBuffer(size_t num_elements, size_t byte_size, size_t alignment, bool initialize);
virtual ~StringAlignedBuffer();
private:
StringAlignedBuffer(const StringAlignedBuffer&) = delete;
StringAlignedBuffer& operator=(const StringAlignedBuffer&) = delete;
protected:
size_t m_num_elements;
};
} // namespace ov

View File

@ -41,6 +41,7 @@ using ov::element::i4;
using ov::element::i64;
using ov::element::i8;
using ov::element::nf4;
using ov::element::string;
using ov::element::u1;
using ov::element::u16;
using ov::element::u32;

View File

@ -51,7 +51,8 @@ enum class Type_t {
u16, //!< u16 element type
u32, //!< u32 element type
u64, //!< u64 element type
nf4 //!< nf4 element type
nf4, //!< nf4 element type
string //!< string element type
};
/// \brief Base class to define element type
@ -181,6 +182,9 @@ constexpr Type u64(Type_t::u64);
/// \brief nf4 element type
/// \ingroup ov_element_cpp_api
constexpr Type nf4(Type_t::nf4);
/// \brief string element type
/// \ingroup ov_element_cpp_api
constexpr Type string(Type_t::string);
template <typename T>
Type from() {
@ -214,6 +218,8 @@ template <>
OPENVINO_API Type from<ov::bfloat16>();
template <>
OPENVINO_API Type from<ov::float16>();
template <>
OPENVINO_API Type from<std::string>();
OPENVINO_API Type fundamental_type_for(const Type& type);

View File

@ -97,4 +97,9 @@ template <>
struct element_type_traits<element::Type_t::nf4> {
using value_type = int8_t;
};
template <>
struct element_type_traits<element::Type_t::string> {
using value_type = std::string;
};
} // namespace ov

View File

@ -161,6 +161,9 @@ public:
case Type_t::nf4:
fill_data<Type_t::nf4>(value);
break;
case Type_t::string:
fill_data<Type_t::string>(value);
break;
case Type_t::undefined:
case Type_t::dynamic:
OPENVINO_THROW("unsupported type");
@ -364,6 +367,9 @@ public:
case Type_t::u64:
cast_vector<Type_t::u64>(rc, num_elements_to_cast);
break;
case Type_t::string:
cast_vector<Type_t::string>(rc, num_elements_to_cast);
break;
default:
OPENVINO_THROW("unsupported type");
}
@ -454,7 +460,7 @@ private:
template <element::Type_t Type,
typename OUT_T,
typename std::enable_if<Type != element::Type_t::u1 && Type != element::Type_t::u4 &&
Type != element::Type_t::i4,
Type != element::Type_t::i4 && Type != element::Type_t::string,
bool>::type = true>
void cast_vector(std::vector<OUT_T>& output_vector, size_t num_elements) const {
// this function is workaround for waring during windows building
@ -511,6 +517,29 @@ private:
});
}
template <element::Type_t Type, typename std::enable_if<Type == element::Type_t::string, bool>::type = true>
void cast_vector(std::vector<std::string>& output_vector, size_t num_elements) const {
auto output_size = std::min(num_elements, shape_size(m_shape));
output_vector.reserve(output_size);
const auto p = get_data_ptr<Type>();
std::copy_n(p, output_size, std::back_inserter(output_vector));
}
template <element::Type_t Type, typename std::enable_if<Type != element::Type_t::string, bool>::type = true>
void cast_vector(std::vector<std::string>& output_vector, size_t num_elements) const {
OPENVINO_THROW("cast_vector does not support casting ov::Tensor of type " +
ov::element::Type(Type).to_string() + "to std::vector of std::string elements");
}
template <element::Type_t Type,
typename OUT_T,
typename std::enable_if<Type == element::Type_t::string, bool>::type = true>
void cast_vector(std::vector<OUT_T>& output_vector, size_t num_elements) const {
auto output_type = std::string(typeid(OUT_T{}).name());
OPENVINO_THROW("cast_vector does not support casting string ov::Tensor to std::vector with elements of type " +
output_type);
}
template <element::Type_t Type,
typename OUT_T,
typename std::enable_if<Type == element::Type_t::u1, bool>::type = true>
@ -569,11 +598,19 @@ private:
output.resize(element_number);
}
template <element::Type_t Type,
typename StorageDataType = fundamental_type_for<Type>,
typename std::enable_if<Type != element::Type_t::string, bool>::type = true>
void fill_data(const std::string& value) {
OPENVINO_THROW("Called fill_data(std::string) with non-string element_type");
}
template <element::Type_t Type,
typename T,
typename StorageDataType = fundamental_type_for<Type>,
typename std::enable_if<Type != element::Type_t::u1 && Type != element::Type_t::u4 &&
Type != element::Type_t::i4 && Type != element::Type_t::nf4,
Type != element::Type_t::i4 && Type != element::Type_t::nf4 &&
Type != element::Type_t::string,
bool>::type = true>
void fill_data(const T& value) {
#ifdef __clang__
@ -614,6 +651,21 @@ private:
std::fill_n(get_data_ptr_nc<Type>(), size, v);
}
template <element::Type_t Type, typename std::enable_if<Type == element::Type_t::string, bool>::type = true>
void fill_data(const std::string& value) {
auto num_elements = shape_size(m_shape);
std::uninitialized_fill_n(get_data_ptr_nc<Type>(), num_elements, value);
}
template <element::Type_t Type,
typename T,
typename StorageDataType = fundamental_type_for<Type>,
typename std::enable_if<Type == element::Type_t::string, bool>::type = true>
void fill_data(const T& value) {
std::string type_name(typeid(value).name());
OPENVINO_THROW("fill_data does not support to fill ov::Tensor of string type with value of " + type_name);
}
template <element::Type_t Type,
typename T,
typename StorageDataType = fundamental_type_for<Type>,
@ -658,7 +710,8 @@ private:
typename T,
typename StorageDataType = fundamental_type_for<Type>,
typename std::enable_if<Type != element::Type_t::nf4 && Type != element::Type_t::u1 &&
Type != element::Type_t::u4 && Type != element::Type_t::i4,
Type != element::Type_t::u4 && Type != element::Type_t::i4 &&
Type != element::Type_t::string,
bool>::type = true>
void write_buffer(const std::vector<T>& source) {
auto p = get_data_ptr_nc<Type>();
@ -667,6 +720,31 @@ private:
}
}
template <element::Type_t Type, typename std::enable_if<Type == element::Type_t::string, bool>::type = true>
void write_buffer(const std::vector<std::string>& source) {
// elements of string ov::Tensor is already pre-initialized in allocate_buffer
auto p = get_data_ptr_nc<Type>();
auto num_elements = std::min(shape_size(m_shape), source.size());
std::uninitialized_copy_n(source.begin(), num_elements, p);
}
template <element::Type_t Type, typename std::enable_if<Type != element::Type_t::string, bool>::type = true>
void write_buffer(const std::vector<std::string>& source) {
OPENVINO_THROW("write_buffer does not support writing std::string elements into ov::Tensor of type:" +
ov::element::Type(Type).to_string());
}
template <element::Type_t Type,
typename T,
typename std::enable_if<Type == element::Type_t::string, bool>::type = true>
void write_buffer(const std::vector<T>& source) {
if (source.size() > 0) {
auto source_type = std::string(typeid(source[0]).name());
OPENVINO_THROW("write_buffer does not support writing elements of type " + source_type +
" into string ov::Tensor");
}
}
template <element::Type_t Type,
typename T,
typename StorageDataType = fundamental_type_for<Type>,
@ -801,6 +879,9 @@ private:
case Type_t::nf4:
write_buffer<Type_t::nf4>(source);
break;
case Type_t::string:
write_buffer<Type_t::string>(source);
break;
case element::Type_t::undefined:
case element::Type_t::dynamic:
OPENVINO_THROW("unsupported type");

View File

@ -113,7 +113,7 @@ public:
* @note Does not perform memory allocation internally
* @param type Tensor element type
* @param shape Tensor shape
* @param host_ptr Pointer to pre-allocated host memory
* @param host_ptr Pointer to pre-allocated host memory with initialized objects
* @param strides Optional strides parameters in bytes. Strides are supposed to be computed automatically based
* on shape and element size
*/
@ -130,7 +130,7 @@ public:
* @brief Constructs Tensor using port from node. Wraps allocated host memory.
* @note Does not perform memory allocation internally
* @param port port from node
* @param host_ptr Pointer to pre-allocated host memory
* @param host_ptr Pointer to pre-allocated host memory with initialized objects
* @param strides Optional strides parameters in bytes. Strides are supposed to be computed automatically based
* on shape and element size
*/

View File

@ -18,6 +18,7 @@
#include "openvino/core/type/nf4.hpp"
#include "openvino/reference/utils/type_util.hpp"
#include "openvino/runtime/shared_buffer.hpp"
#include "openvino/runtime/string_aligned_buffer.hpp"
namespace ov {
namespace op {
@ -76,6 +77,8 @@ Constant::Constant(const std::shared_ptr<ngraph::runtime::Tensor>& tensor) {
tensor->get_size_in_bytes(),
tensor);
} else {
OPENVINO_ASSERT(m_element_type != ov::element::string,
"Creation of string constant for ngraph::runtime::Tensor is supported only for HostTensor");
constructor_validate_and_infer_types();
allocate_buffer(false);
tensor->read(get_data_ptr_nc(), tensor->get_size_in_bytes());
@ -93,7 +96,32 @@ Constant::Constant(const Tensor& tensor)
}
Constant::Constant(const element::Type& type, const Shape& shape, const std::vector<std::string>& values)
: Constant(type, shape, from_string_vector(values)) {
: Constant(false, type, shape) {
NODE_VALIDATION_CHECK(this,
values.size() == 1 || values.size() == shape_size(m_shape),
"Did not get the expected number of literals for a constant of shape ",
m_shape,
" (got ",
values.size(),
", expected ",
(shape_size(m_shape) == 1 ? "" : "1 or "),
shape_size(m_shape),
").");
if (type == element::string) {
if (values.size() == 1) {
fill_data(type, values.front());
} else {
write_values(values);
}
} else {
auto parsed_values = from_string_vector(values);
if (parsed_values.size() == 1) {
fill_data(type, parsed_values.front());
} else {
write_values(parsed_values);
}
}
const auto is_checked_and_identical = (values.size() == 1) && (shape_size(m_shape) != 1);
update_identical_flags(is_checked_and_identical, is_checked_and_identical);
}
@ -108,14 +136,28 @@ Constant::Constant(bool memset_allocation, const element::Type& type, const Shap
}
void Constant::allocate_buffer(bool memset_allocation) {
m_data = std::make_shared<AlignedBuffer>(mem_size(), host_alignment());
if (memset_allocation) {
std::memset(m_data->get_ptr(), 0, m_data->size());
// memset_allocation flag is to switch on initialization of objects in memory for element::string type
// and set memory to zero for numeric element types
if (m_element_type == ov::element::string) {
auto num_elements = shape_size(m_shape);
m_data = std::make_shared<StringAlignedBuffer>(num_elements, mem_size(), host_alignment(), memset_allocation);
} else {
m_data = std::make_shared<AlignedBuffer>(mem_size(), host_alignment());
if (memset_allocation) {
std::memset(m_data->get_ptr(), 0, m_data->size());
}
}
}
Constant::Constant(const element::Type& type, const Shape& shape, const void* data) : Constant(false, type, shape) {
std::memcpy(get_data_ptr_nc(), data, mem_size());
if (m_element_type == ov::element::string) {
auto num_elements = shape_size(m_shape);
const std::string* src_strings = static_cast<const std::string*>(data);
std::string* dst_strings = static_cast<std::string*>(get_data_ptr_nc());
std::uninitialized_copy_n(src_strings, num_elements, dst_strings);
} else {
std::memcpy(get_data_ptr_nc(), data, mem_size());
}
}
Constant::Constant(const Constant& other)
@ -161,11 +203,17 @@ struct ValueToString : ov::element::NotSupported<std::string> {
static result_type visit(const Constant* const c, const size_t index) {
return std::to_string(c->get_element_value<ET>(index));
}
template <ov::element::Type_t ET,
typename std::enable_if<std::is_same<fundamental_type_for<ET>, std::string>::value>::type* = nullptr>
static result_type visit(const Constant* const c, const size_t index) {
return c->get_element_value<ET>(index);
}
};
std::string Constant::convert_value_to_string(size_t index) const {
using namespace ov::element;
return IfTypeOf<boolean, bf16, f16, f32, f64, i4, i8, i16, i32, i64, u1, u4, u8, u16, u32, u64, nf4>::apply<
return IfTypeOf<boolean, bf16, f16, f32, f64, i4, i8, i16, i32, i64, u1, u4, u8, u16, u32, u64, nf4, string>::apply<
ValueToString>(get_element_type(), this, index);
}
@ -211,12 +259,18 @@ struct ValuesToString : ov::element::NotSupported<void> {
strs.push_back(std::to_string(v));
}
}
template <ov::element::Type_t ET,
typename std::enable_if<std::is_same<fundamental_type_for<ET>, std::string>::value>::type* = nullptr>
static result_type visit(const Constant* const c, std::vector<std::string>& strs) {
strs = c->cast_vector<std::string>();
}
};
std::vector<std::string> Constant::get_value_strings() const {
std::vector<std::string> out;
using namespace ov::element;
IfTypeOf<boolean, bf16, f16, f32, f64, i4, i8, i16, i32, i64, u1, u4, u8, u16, u32, u64, nf4>::apply<
IfTypeOf<boolean, bf16, f16, f32, f64, i4, i8, i16, i32, i64, u1, u4, u8, u16, u32, u64, nf4, string>::apply<
ValuesToString>(get_element_type(), this, out);
return out;
}
@ -293,6 +347,9 @@ bool Constant::are_all_data_elements_bitwise_identical() const {
case element::Type_t::u64:
all_identical = test_bitwise_identical(get_data_ptr<uint64_t>(), shape_size(m_shape));
break;
case element::Type_t::string:
all_identical = test_bitwise_identical(get_data_ptr<std::string>(), shape_size(m_shape));
break;
default:
all_identical = false;
break;
@ -328,7 +385,15 @@ bool Constant::evaluate(TensorVector& outputs, const TensorVector& inputs) const
outputs.emplace_back(m_element_type, m_shape);
else
outputs[0].set_shape(m_shape);
std::memcpy(outputs[0].data(), get_data_ptr(), outputs[0].get_byte_size());
if (m_element_type == ov::element::string) {
auto num_elements = shape_size(m_shape);
const std::string* src_strings = static_cast<const std::string*>(get_data_ptr());
std::string* dst_strings = static_cast<std::string*>(outputs[0].data());
std::copy_n(src_strings, num_elements, dst_strings);
} else {
std::memcpy(outputs[0].data(), get_data_ptr(), outputs[0].get_byte_size());
}
return true;
}

View File

@ -49,9 +49,15 @@ bool Result::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
}
outputs[0].set_shape(inputs[0].get_shape());
void* output = outputs[0].data();
const void* input = inputs[0].data();
memcpy(output, input, outputs[0].get_byte_size());
if (inputs[0].get_element_type() == element::string) {
// memcpy for element::string Tensor does not work because output elements
// will refer to input string elements but they must be separate objects in memory
inputs[0].copy_to(outputs[0]);
} else {
void* output = outputs[0].data();
const void* input = inputs[0].data();
memcpy(output, input, outputs[0].get_byte_size());
}
return true;
}

View File

@ -754,6 +754,8 @@ std::string get_precision_name(const ov::element::Type& elem_type) {
return "BOOL";
case ::ov::element::Type_t::nf4:
return "NF4";
case ::ov::element::Type_t::string:
return "STRING";
default:
OPENVINO_THROW("Unsupported precision: ", elem_type);
}

View File

@ -397,6 +397,9 @@ static std::string get_value(const std::shared_ptr<ov::op::v0::Constant>& consta
case ov::element::Type_t::u64:
ss << pretty_value(constant->cast_vector<uint64_t>(max_elements), allow_obfuscate);
break;
case ov::element::Type_t::string:
ss << constant->get_output_element_type(0).get_type_name() << " value";
break;
}
const auto num_elements_in_constant = static_cast<int>(shape_size(constant->get_shape()));
if (num_elements_in_constant == 0)

View File

@ -158,9 +158,24 @@ void ITensor::copy_to(const std::shared_ptr<ov::ITensor>& dst) const {
return offset;
};
using copy_function_def = std::function<void(const uint8_t*, uint8_t*, size_t)>;
copy_function_def memcpy_based_copy = [](const uint8_t* src_data, uint8_t* dst_data, size_t bytes_size) {
memcpy(dst_data, src_data, bytes_size);
};
copy_function_def strings_copy = [](const uint8_t* src_data, uint8_t* dst_data, size_t bytes_size) {
// in case string tensors, it needs to copy of new values for std::string objects
// memcpy is not suitable
auto dst_string = reinterpret_cast<std::string*>(dst_data);
auto src_string = reinterpret_cast<const std::string*>(src_data);
size_t num_elements_stride = bytes_size / element::string.size();
std::copy_n(src_string, num_elements_stride, dst_string);
};
copy_function_def copy_function = (get_element_type() == element::string) ? strings_copy : memcpy_based_copy;
bool finish = false;
for (size_t dst_idx = 0, src_idx = 0; !finish;) {
memcpy(dst_data + dst_idx, src_data + src_idx, src_strides[src_strides.size() - 1]);
copy_function(src_data + src_idx, dst_data + dst_idx, src_strides[src_strides.size() - 1]);
// update indexes
for (size_t i = 0; i < cur_pos.size(); i++) {
size_t inverted_idx = cur_pos.size() - i - 1;

View File

@ -0,0 +1,32 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "openvino/runtime/string_aligned_buffer.hpp"
#include "openvino/runtime/aligned_buffer.hpp"
namespace ov {
StringAlignedBuffer::StringAlignedBuffer(size_t num_elements, size_t byte_size, size_t alignment, bool initialize)
: AlignedBuffer(byte_size, alignment),
m_num_elements(num_elements) {
OPENVINO_ASSERT(sizeof(std::string) * num_elements <= byte_size + alignment,
"Allocated memory of size " + std::to_string(byte_size) + " bytes is not enough to store " +
std::to_string(num_elements) + " std::string objects");
if (initialize) {
auto strings = reinterpret_cast<std::string*>(m_aligned_buffer);
std::uninitialized_fill_n(strings, m_num_elements, std::string());
}
}
StringAlignedBuffer::~StringAlignedBuffer() {
if (m_aligned_buffer) {
auto strings = reinterpret_cast<std::string*>(m_aligned_buffer);
for (size_t ind = 0; ind < m_num_elements; ++ind) {
using std::string;
strings[ind].~string();
}
}
}
} // namespace ov

View File

@ -71,6 +71,8 @@ inline TypeInfo get_type_info(ov::element::Type_t type) {
return {64, false, false, false, "uint64_t", "u64"};
case ov::element::Type_t::nf4:
return {4, false, false, true, "nfloat4", "nf4"};
case ov::element::Type_t::string:
return {8 * sizeof(std::string), false, false, false, "string", "string"};
default:
OPENVINO_THROW("ov::element::Type_t not supported: ", type);
}
@ -109,6 +111,8 @@ ov::element::Type type_from_string(const std::string& type) {
return ::ov::element::Type(::ov::element::Type_t::u64);
} else if (type == "boolean" || type == "BOOL") {
return ::ov::element::Type(::ov::element::Type_t::boolean);
} else if (type == "string" || type == "STRING") {
return ::ov::element::Type(::ov::element::Type_t::string);
} else if (type == "undefined" || type == "UNSPECIFIED") {
return ::ov::element::Type(::ov::element::Type_t::undefined);
} else if (type == "dynamic") {
@ -138,7 +142,8 @@ std::vector<const ov::element::Type*> ov::element::Type::get_known_types() {
&ov::element::u8,
&ov::element::u16,
&ov::element::u32,
&ov::element::u64};
&ov::element::u64,
&ov::element::string};
return rc;
}
@ -168,6 +173,7 @@ ov::element::Type::Type(size_t bitwidth,
{ov::element::Type_t::u32, {32, false, false, false, "uint32_t", "u32"}},
{ov::element::Type_t::u64, {64, false, false, false, "uint64_t", "u64"}},
{ov::element::Type_t::u4, {4, false, false, false, "uint4_t", "nf4"}},
{ov::element::Type_t::string, {8 * sizeof(std::string), false, false, false, "string", "string"}},
};
for (const auto& t : elements_map) {
const TypeInfo& info = t.second;
@ -259,6 +265,10 @@ template <>
Type from<ov::bfloat16>() {
return Type_t::bf16;
}
template <>
Type from<std::string>() {
return Type_t::string;
}
Type fundamental_type_for(const Type& type) {
switch (type) {
@ -294,6 +304,8 @@ Type fundamental_type_for(const Type& type) {
return from<element_type_traits<Type_t::u32>::value_type>();
case Type_t::u64:
return from<element_type_traits<Type_t::u64>::value_type>();
case Type_t::string:
return from<element_type_traits<Type_t::string>::value_type>();
default:
OPENVINO_THROW("Unsupported Data type: ", type);
}
@ -325,6 +337,7 @@ std::istream& ov::element::operator>>(std::istream& in, ov::element::Type& obj)
{"FP16", ov::element::f16},
{"BIN", ov::element::u1},
{"NF4", ov::element::nf4},
{"STRING", ov::element::string},
};
std::string str;
in >> str;
@ -407,6 +420,7 @@ inline size_t compiler_byte_size(ov::element::Type_t et) {
ET_CASE(u32);
ET_CASE(u64);
ET_CASE(nf4);
ET_CASE(string);
#undef ET_CASE
case ov::element::Type_t::undefined:
return 0;
@ -439,7 +453,8 @@ OPENVINO_API EnumNames<element::Type_t>& EnumNames<element::Type_t>::get() {
{"u16", element::Type_t::u16},
{"u32", element::Type_t::u32},
{"u64", element::Type_t::u64},
{"nf4", element::Type_t::nf4}});
{"nf4", element::Type_t::nf4},
{"string", element::Type_t::string}});
return enum_names;
}

View File

@ -1351,6 +1351,60 @@ TEST(constant, shared_data) {
EXPECT_EQ(p1, p2);
}
//
// string
//
TEST(constant, ov_string) {
Shape shape{4};
vector<std::string> input{"abc", "one two three", "1", "0"};
ov::op::v0::Constant c(element::string, shape, input);
auto v = c.get_vector<std::string>();
ASSERT_EQ(v.size(), shape_size(shape));
EXPECT_EQ(v[0], "abc");
EXPECT_EQ(v[1], "one two three");
EXPECT_EQ(v[2], "1");
EXPECT_EQ(v[3], "0");
const std::string* p = c.get_data_ptr<std::string>();
EXPECT_EQ(p[0], "abc");
EXPECT_EQ(p[1], "one two three");
EXPECT_EQ(p[2], "1");
EXPECT_EQ(p[3], "0");
EXPECT_EQ(input, c.get_value_strings());
for (unsigned i = 0; i != input.size(); ++i) {
EXPECT_EQ(input[i], c.convert_value_to_string(i));
}
}
TEST(constant, ov_string_broadcast) {
Shape shape{4};
ov::op::v0::Constant c(element::string, shape, vector<string>{"one two "});
auto v = c.get_vector<std::string>();
ASSERT_EQ(v.size(), shape_size(shape));
EXPECT_EQ(v[0], "one two ");
EXPECT_EQ(v[1], "one two ");
EXPECT_EQ(v[2], "one two ");
EXPECT_EQ(v[3], "one two ");
const std::string* p = c.get_data_ptr<std::string>();
EXPECT_EQ(p[0], "one two ");
EXPECT_EQ(p[1], "one two ");
EXPECT_EQ(p[2], "one two ");
EXPECT_EQ(p[3], "one two ");
}
TEST(constant, ov_string_shared_data) {
Shape shape{100, 200};
auto c1 = make_shared<ov::op::v0::Constant>(element::string, shape, vector<std::string>{"123"});
auto c2 = static_pointer_cast<ov::op::v0::Constant>(c1->clone_with_new_inputs({}));
const int16_t* p1 = c1->get_data_ptr<int16_t>();
const int16_t* p2 = c2->get_data_ptr<int16_t>();
EXPECT_EQ(p1, p2);
}
template <typename T1, typename T2>
::testing::AssertionResult test_convert() {
Shape shape{5};
@ -1363,6 +1417,15 @@ template <typename T1, typename T2>
return rc;
}
TEST(constant, convert_input_ov_string) {
Shape shape{5};
vector<std::string> expected{"1", "2", "3", "4", "5"};
auto c1 = make_shared<ov::op::v0::Constant>(ov::element::from<std::string>(), shape, expected);
vector<std::string> actual = c1->template cast_vector<std::string>();
EXPECT_EQ(actual, expected);
}
TEST(constant, convert_input) {
EXPECT_TRUE((test_convert<float, float>()));
EXPECT_TRUE((test_convert<float, double>()));
@ -1708,6 +1771,23 @@ TEST(constant, bad_get_data_ptr) {
}
}
TEST(constant, bad_get_data_ptr_ov_string) {
ov::op::v0::Constant c(element::string, Shape{}, vector<std::string>{"abc"});
EXPECT_EQ(*c.get_data_ptr<element::Type_t::string>(), "abc");
try {
c.get_data_ptr<element::Type_t::f64>();
FAIL() << "Bad type not detected.";
} catch (const AssertFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(), std::string("get_data_ptr"));
}
try {
c.get_data_ptr<element::Type_t::i32>();
FAIL() << "Bad type not detected.";
} catch (const AssertFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(), std::string("get_data_ptr"));
}
}
TEST(constant, hold_tensor) {
Shape shape{4};
void* hostDataPtr = nullptr;
@ -1721,6 +1801,19 @@ TEST(constant, hold_tensor) {
ASSERT_EQ(constDataPtr, hostDataPtr);
}
TEST(constant, hold_tensor_ov_string) {
Shape shape{4};
void* hostDataPtr = nullptr;
std::shared_ptr<ov::op::v0::Constant> constOp;
{
auto tensor = ov::Tensor(element::string, Shape{1, 2, 3, 3});
hostDataPtr = tensor.data();
constOp = std::make_shared<ov::op::v0::Constant>(tensor);
}
const void* constDataPtr = constOp->get_data_ptr();
ASSERT_EQ(constDataPtr, hostDataPtr);
}
// Test verifies 2 things:
// a) Checks that bitwise comparison happens on first call of 'get_all_data_elements_bitwise_identical'
// b) Next call of 'get_all_data_elements_bitwise_identical' takes already calculated value
@ -1820,6 +1913,29 @@ TEST(constant, cast_vector) {
}
}
TEST(constant, cast_vector_ov_string) {
element::Type_t type = element::string;
std::vector<std::string> data = {"a", "b", "c", "d", "e", "f", "g", "h"};
std::vector<std::string> expected_partial_data = {"a", "b", "c", "d", "e", "f"};
const auto& constant = op::v0::Constant::create(type, Shape{data.size()}, data);
const auto& default_casted = constant->cast_vector<std::string>();
EXPECT_EQ(default_casted, data) << "Constant::cast_vector failed default casting for type " << type;
int64_t num_elements_for_partial_casting = static_cast<int64_t>(expected_partial_data.size());
const auto& partially_casted = constant->cast_vector<std::string>(num_elements_for_partial_casting);
EXPECT_EQ(partially_casted, expected_partial_data)
<< "Constant::cast_vector failed partial casting for type " << type;
int64_t num_elements_for_over_casting = static_cast<int64_t>(data.size()) + 10;
const auto& over_casted = constant->cast_vector<std::string>(num_elements_for_over_casting);
EXPECT_EQ(over_casted, data) << "Constant::cast_vector failed for partial casting for type " << type;
EXPECT_TRUE(constant->cast_vector<std::string>(0).empty())
<< "Constant::cast_vector failed empty casting for type " << type;
}
TEST(constant, get_values_as) {
ov::op::v0::Constant c(element::i64, Shape{6}, std::vector<int64_t>{2, -3, 1, 0, 1, 5});

View File

@ -24,6 +24,7 @@ TEST(element_type, from) {
EXPECT_EQ(element::from<uint16_t>(), element::u16);
EXPECT_EQ(element::from<uint32_t>(), element::u32);
EXPECT_EQ(element::from<uint64_t>(), element::u64);
EXPECT_EQ(element::from<std::string>(), element::string);
}
TEST(element_type, from_string) {
@ -66,6 +67,8 @@ TEST(element_type, from_string) {
EXPECT_EQ(element::Type("U64"), element::u64);
EXPECT_EQ(element::Type("nf4"), element::nf4);
EXPECT_EQ(element::Type("NF4"), element::nf4);
EXPECT_EQ(element::Type("string"), element::string);
EXPECT_EQ(element::Type("STRING"), element::string);
EXPECT_EQ(element::Type("undefined"), element::undefined);
EXPECT_EQ(element::Type("UNSPECIFIED"), element::undefined);

View File

@ -0,0 +1,26 @@
<?xml version="1.0"?>
<net name="Model0" version="11">
<layers>
<layer id="0" name="Parameter_1" type="Parameter" version="opset1">
<data shape="1,3" element_type="string" />
<output>
<port id="0" precision="STRING">
<dim>1</dim>
<dim>3</dim>
</port>
</output>
</layer>
<layer id="1" name="Result_2" type="Result" version="opset1">
<input>
<port id="0" precision="STRING">
<dim>1</dim>
<dim>3</dim>
</port>
</input>
</layer>
</layers>
<edges>
<edge from-layer="0" from-port="0" to-layer="1" to-port="0" />
</edges>
<rt_info />
</net>

View File

@ -23,6 +23,8 @@
using OVTensorTest = ::testing::Test;
const size_t string_size = ov::element::string.size();
inline ov::Strides byteStrides(const ov::Strides& strides, const ov::element::Type& type) {
ov::Strides byte_strides(strides.size());
for (size_t i = 0; i < strides.size(); ++i)
@ -45,6 +47,21 @@ TEST_F(OVTensorTest, canCreateTensor) {
ASSERT_THROW(t.data<std::int32_t>(), ov::Exception);
}
TEST_F(OVTensorTest, canCreateStringTensor) {
ov::Shape shape = {4, 3, 2};
ov::Tensor t{ov::element::string, shape};
const std::size_t totalSize = ov::shape_size(shape);
ASSERT_EQ(totalSize, t.get_size());
ASSERT_NE(nullptr, t.data());
ASSERT_EQ(ov::element::string, t.get_element_type());
ASSERT_EQ(shape, t.get_shape());
ASSERT_NE(shape, t.get_strides());
ASSERT_EQ(byteStrides(ov::Strides({6, 2, 1}), t.get_element_type()), t.get_strides());
ASSERT_EQ(string_size * totalSize, t.get_byte_size());
ASSERT_THROW(t.data(ov::element::i64), ov::Exception);
ASSERT_THROW(t.data<std::int32_t>(), ov::Exception);
}
TEST_F(OVTensorTest, createTensorFromPort) {
auto parameter1 = std::make_shared<ov::op::v0::Parameter>(ov::element::f64, ov::Shape{1, 3, 2, 2});
auto parameter2 = std::make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::Shape{1, 3});
@ -65,6 +82,27 @@ TEST_F(OVTensorTest, createTensorFromPort) {
EXPECT_EQ(t4.get_element_type(), parameter3->get_element_type());
}
TEST_F(OVTensorTest, createStringTensorFromPort) {
auto parameter1 = std::make_shared<ov::op::v0::Parameter>(ov::element::string, ov::Shape{1, 3, 2, 2});
auto parameter2 = std::make_shared<ov::op::v0::Parameter>(ov::element::string, ov::Shape{1, 3});
auto parameter3 = std::make_shared<ov::op::v0::Parameter>(ov::element::string, ov::PartialShape::dynamic());
std::string data[] = {"one", "two sentence", "three 3 sentence"};
ov::Tensor t1{parameter1->output(0)};
ov::Tensor t2{parameter2->output(0), data};
ov::Tensor t3{parameter3->output(0)};
ov::Tensor t4{parameter3->output(0), data};
EXPECT_EQ(t1.get_shape(), parameter1->get_shape());
EXPECT_EQ(t1.get_element_type(), parameter1->get_element_type());
EXPECT_EQ(t2.get_shape(), parameter2->get_shape());
EXPECT_EQ(t2.get_element_type(), parameter2->get_element_type());
EXPECT_EQ(t3.get_shape(), ov::Shape{0});
EXPECT_EQ(t3.get_element_type(), parameter3->get_element_type());
EXPECT_EQ(t4.get_shape(), ov::Shape{0});
EXPECT_EQ(t4.get_element_type(), parameter3->get_element_type());
}
TEST_F(OVTensorTest, canAccessF16Tensor) {
ov::Shape shape = {4, 3, 2};
ov::Tensor t{ov::element::f16, shape};
@ -76,6 +114,24 @@ TEST_F(OVTensorTest, canAccessF16Tensor) {
EXPECT_THROW(t.data<std::int16_t>(), ov::Exception);
}
TEST_F(OVTensorTest, canAccessStringTensor) {
ov::Shape shape = {4, 3, 2};
ov::Tensor t{ov::element::string, shape};
EXPECT_NE(nullptr, t.data());
EXPECT_NO_THROW(t.data(ov::element::string));
EXPECT_NO_THROW(t.data<std::string>());
// check that all elements of string ov::Tensor are empty strings
auto string_elements = t.data<std::string>();
auto num_elements = t.get_size();
for (size_t ind = 0; ind < num_elements; ++ind) {
EXPECT_EQ(string_elements[ind], std::string());
}
EXPECT_THROW(t.data<std::uint16_t>(), ov::Exception);
EXPECT_THROW(t.data<std::int16_t>(), ov::Exception);
}
TEST_F(OVTensorTest, canAccessU8Tensor) {
ov::Shape shape = {4, 3, 2};
ov::Tensor t{ov::element::u8, shape};
@ -96,6 +152,11 @@ TEST_F(OVTensorTest, emptySize) {
ASSERT_NE(nullptr, t.data());
}
TEST_F(OVTensorTest, emptySizeStringTensor) {
ov::Tensor t(ov::element::string, {0});
ASSERT_NE(nullptr, t.data());
}
TEST_F(OVTensorTest, operators) {
ov::Tensor t;
ASSERT_FALSE(t);
@ -110,7 +171,6 @@ public:
MOCK_METHOD(bool, is_equal, (const ov::AllocatorImpl&), (const, noexcept)); // NOLINT(readability/casting)
};
OPENVINO_SUPPRESS_DEPRECATED_START
TEST_F(OVTensorTest, canCreateTensorUsingMockAllocatorImpl) {
ov::Shape shape = {1, 2, 3};
auto allocator = std::make_shared<OVMockAllocatorImpl>();
@ -172,6 +232,22 @@ TEST_F(OVTensorTest, canAccessExternalData) {
}
}
TEST_F(OVTensorTest, canAccessExternalDataStringTensor) {
ov::Shape shape = {1, 1, 3};
std::string data[] = {"one two three", "123", ""};
ov::Tensor t{ov::element::string, shape, data};
{
std::string* ptr = t.data<std::string>();
ASSERT_EQ(ptr[2], "");
ASSERT_EQ(data, t.data(ov::element::string));
ASSERT_EQ(data, ptr);
ASSERT_THROW(t.data<std::int16_t>(), ov::Exception);
ASSERT_EQ(byteStrides(ov::row_major_strides(shape), t.get_element_type()), t.get_strides());
ASSERT_EQ(ov::shape_size(shape), t.get_size());
ASSERT_EQ(ov::shape_size(shape) * string_size, t.get_byte_size());
}
}
TEST_F(OVTensorTest, canAccessExternalDataWithStrides) {
ov::Shape shape = {2, 3};
float data[] = {5.f, 6.f, 7.f, 0.f, 1.f, 42.f, 3.f, 0.f};
@ -184,11 +260,29 @@ TEST_F(OVTensorTest, canAccessExternalDataWithStrides) {
}
}
TEST_F(OVTensorTest, canAccessExternalDataWithStridesStringTensor) {
ov::Shape shape = {2, 3};
std::string data[] = {"abdcd efg hi", "01234", "xyz ", " ", "$%&%&& (*&&", "", "\n ", "\t "};
ov::Strides strides = {shape[1] * string_size + string_size, string_size};
ov::Tensor t{ov::element::string, shape, data, strides};
ASSERT_EQ(strides, t.get_strides());
{
ASSERT_EQ((ov::Shape{2, 3}), t.get_shape());
const std::string* ptr = t.data<const std::string>();
ASSERT_EQ(ptr[4], "$%&%&& (*&&");
}
}
TEST_F(OVTensorTest, cannotCreateTensorWithExternalNullptr) {
ov::Shape shape = {2, 3};
ASSERT_THROW(ov::Tensor(ov::element::f32, shape, nullptr), ov::Exception);
}
TEST_F(OVTensorTest, cannotCreateStringTensorWithExternalNullptr) {
ov::Shape shape = {2, 3};
ASSERT_THROW(ov::Tensor(ov::element::string, shape, nullptr), ov::Exception);
}
TEST_F(OVTensorTest, cannotCreateTensorWithWrongStrides) {
ov::Shape shape = {2, 3};
float data[] = {5.f, 6.f, 7.f, 0.f, 1.f, 42.f, 3.f, 0.f};
@ -212,6 +306,29 @@ TEST_F(OVTensorTest, cannotCreateTensorWithWrongStrides) {
}
}
TEST_F(OVTensorTest, cannotCreateStringTensorWithWrongStrides) {
ov::Shape shape = {2, 3};
std::string data[] = {"abdcd efg hi", "01234", "xyz ", " ", "$%&%&& (*&&", "", "\n ", "\t "};
const auto el = ov::element::string;
{
// strides.size() != shape.size()
EXPECT_THROW(ov::Tensor(el, shape, data, byteStrides({6, 3, 1}, el)), ov::Exception);
}
{
// strides values are element-wise >= ov::row_major_strides(shape) values
EXPECT_THROW(ov::Tensor(el, shape, data, byteStrides({2, 1}, el)), ov::Exception);
EXPECT_THROW(ov::Tensor(el, shape, data, byteStrides({3, 0}, el)), ov::Exception);
EXPECT_THROW(ov::Tensor(el, shape, data, byteStrides({3, 2}, el)), ov::Exception);
EXPECT_NO_THROW(ov::Tensor(el, shape, data, byteStrides({6, 2}, el)));
}
{
// strides are not divisible by elem_size
EXPECT_THROW(ov::Tensor(el, shape, data, {43, el.size()}), ov::Exception);
EXPECT_THROW(ov::Tensor(el, shape, data, {3, 0}), ov::Exception);
EXPECT_THROW(ov::Tensor(el, shape, data, {el.size(), 61}), ov::Exception);
}
}
TEST_F(OVTensorTest, saveDimsAndSizeAfterMove) {
ov::Shape shape = {1, 2, 3};
ov::Tensor t{ov::element::f32, shape};
@ -232,6 +349,26 @@ TEST_F(OVTensorTest, saveDimsAndSizeAfterMove) {
ASSERT_THROW(t.data<float>(), ov::Exception);
}
TEST_F(OVTensorTest, saveDimsAndSizeAfterMoveStringTensor) {
ov::Shape shape = {1, 2, 3};
ov::Tensor t{ov::element::string, shape};
ov::Tensor new_tensor(std::move(t));
ASSERT_EQ(shape, new_tensor.get_shape());
ASSERT_EQ(ov::element::string, new_tensor.get_element_type());
ASSERT_EQ(byteStrides(ov::row_major_strides(shape), new_tensor.get_element_type()), new_tensor.get_strides());
ASSERT_THROW(t.get_size(), ov::Exception);
ASSERT_THROW(t.get_element_type(), ov::Exception);
ASSERT_THROW(t.get_byte_size(), ov::Exception);
ASSERT_THROW(t.get_strides(), ov::Exception);
ASSERT_THROW(t.get_shape(), ov::Exception);
ASSERT_THROW(t.set_shape({}), ov::Exception);
ASSERT_THROW(t.data(), ov::Exception);
ASSERT_THROW(t.data<std::string>(), ov::Exception);
}
// SetShape
TEST_F(OVTensorTest, canSetShape) {
const ov::Shape origShape({1, 2, 3});
@ -262,6 +399,36 @@ TEST_F(OVTensorTest, canSetShape) {
}
}
TEST_F(OVTensorTest, canSetShapeStringTensor) {
const ov::Shape origShape({1, 2, 3});
ov::Tensor t{ov::element::string, {1, 2, 3}};
const ov::Shape newShape({4, 5, 6});
const void* orig_data = t.data();
ASSERT_EQ(t.get_shape(), origShape);
ASSERT_NO_THROW(t.set_shape(newShape));
ASSERT_EQ(newShape, t.get_shape());
ASSERT_EQ(byteStrides(ov::row_major_strides(newShape), t.get_element_type()), t.get_strides());
ASSERT_NE(orig_data, t.data());
const void* new_data = t.data();
// check that setShape for copy changes original Tensor
{
ov::Tensor t2 = t;
ASSERT_NO_THROW(t2.set_shape(origShape));
ASSERT_EQ(origShape, t2.get_shape());
ASSERT_EQ(origShape, t.get_shape());
ASSERT_EQ(t2.data(), t.data());
}
// set_shape for smaller memory - does not perform reallocation
{
ASSERT_NO_THROW(t.set_shape(origShape));
ASSERT_EQ(origShape, t.get_shape());
ASSERT_EQ(new_data, t.data());
}
}
TEST_F(OVTensorTest, cannotSetShapeOfBiggerSizeOnPreallocatedMemory) {
float data[4 * 5 * 6 * 2];
ov::Tensor t{ov::element::f32, {1, 2, 3}, data};
@ -270,6 +437,14 @@ TEST_F(OVTensorTest, cannotSetShapeOfBiggerSizeOnPreallocatedMemory) {
ASSERT_THROW(t.set_shape(newShape), ov::Exception);
}
TEST_F(OVTensorTest, cannotSetShapeOfBiggerSizeOnPreallocatedMemoryStringTensor) {
std::string data[4 * 5 * 6];
ov::Tensor t{ov::element::string, {1, 2, 3}, data};
const ov::Shape newShape({4, 5, 6});
ASSERT_THROW(t.set_shape(newShape), ov::Exception);
}
TEST_F(OVTensorTest, canSetShapeOfSmallerSizeOnPreallocatedMemory) {
float data[4 * 5 * 6 * 2];
ov::Tensor t{ov::element::f32, {4, 5, 6}, data};
@ -278,6 +453,14 @@ TEST_F(OVTensorTest, canSetShapeOfSmallerSizeOnPreallocatedMemory) {
ASSERT_NO_THROW(t.set_shape(newShape));
}
TEST_F(OVTensorTest, canSetShapeOfSmallerSizeOnPreallocatedMemoryStringTensor) {
std::string data[4 * 5 * 6];
ov::Tensor t{ov::element::string, {4, 5, 6}, data};
const ov::Shape newShape({1, 2, 3});
ASSERT_NO_THROW(t.set_shape(newShape));
}
TEST_F(OVTensorTest, canSetShapeOfSameSizeOnPreallocatedMemory) {
float data[4 * 5 * 6 * 2];
ov::Tensor t{ov::element::f32, {4, 5, 6}, data};
@ -286,6 +469,14 @@ TEST_F(OVTensorTest, canSetShapeOfSameSizeOnPreallocatedMemory) {
ASSERT_NO_THROW(t.set_shape(newShape));
}
TEST_F(OVTensorTest, canSetShapeOfSameSizeOnPreallocatedMemoryStringTensor) {
std::string data[4 * 5 * 6];
ov::Tensor t{ov::element::string, {4, 5, 6}, data};
const ov::Shape newShape({4, 5, 6});
ASSERT_NO_THROW(t.set_shape(newShape));
}
TEST_F(OVTensorTest, canSetShapeOfOriginalSizeAfterDecreasingOnPreallocatedMemory) {
float data[4 * 5 * 6 * 2];
ov::Tensor t{ov::element::f32, {4, 5, 6}, data};
@ -296,6 +487,16 @@ TEST_F(OVTensorTest, canSetShapeOfOriginalSizeAfterDecreasingOnPreallocatedMemor
ASSERT_NO_THROW(t.set_shape(originalShape));
}
TEST_F(OVTensorTest, canSetShapeOfOriginalSizeAfterDecreasingOnPreallocatedMemoryStringTensor) {
std::string data[4 * 5 * 6];
ov::Tensor t{ov::element::string, {4, 5, 6}, data};
const ov::Shape smallerShape({1, 2, 3});
const ov::Shape originalShape({4, 5, 6});
ASSERT_NO_THROW(t.set_shape(smallerShape));
ASSERT_NO_THROW(t.set_shape(originalShape));
}
TEST_F(OVTensorTest, canChangeShapeOnStridedTensor) {
float data[64 * 4];
ov::Tensor t{ov::element::f32, {4, 2, 2}, data, {64, 16, 4}};
@ -306,6 +507,16 @@ TEST_F(OVTensorTest, canChangeShapeOnStridedTensor) {
ASSERT_NO_THROW(t.set_shape(correct_shape));
}
TEST_F(OVTensorTest, canChangeShapeOnStridedTensorStringTensor) {
std::string data[64 * 4];
ov::Tensor t{ov::element::string, {4, 2, 2}, data, {8 * string_size, 3 * string_size, string_size}};
const ov::Shape incorrect_shape({2, 2, 4});
const ov::Shape correct_shape({1, 1, 2});
ASSERT_THROW(t.set_shape(incorrect_shape), ov::Exception);
ASSERT_NO_THROW(t.set_shape(correct_shape));
}
TEST_F(OVTensorTest, makeRangeRoiTensor) {
ov::Tensor t{ov::element::i32, {1, 3, 6, 5}}; // RGBp picture of size (WxH) = 5x6
ov::Tensor roi_tensor{t, {0, 0, 1, 2}, {1, 3, 5, 4}};
@ -321,6 +532,21 @@ TEST_F(OVTensorTest, makeRangeRoiTensor) {
ASSERT_EQ(roi_tensor.get_element_type(), t.get_element_type());
}
TEST_F(OVTensorTest, makeRangeRoiStringTensor) {
ov::Tensor t{ov::element::string, {1, 3, 6, 5}}; // RGBp picture of size (WxH) = 5x6
ov::Tensor roi_tensor{t, {0, 0, 1, 2}, {1, 3, 5, 4}};
ov::Shape ref_shape = {1, 3, 4, 2};
ptrdiff_t ref_offset_elems = 7;
ptrdiff_t ref_offset_bytes = ref_offset_elems * string_size;
ov::Strides ref_strides = {90, 30, 5, 1};
ASSERT_EQ(roi_tensor.get_shape(), ref_shape);
ASSERT_EQ(roi_tensor.data<std::string>() - t.data<std::string>(), ref_offset_elems);
ASSERT_EQ(reinterpret_cast<uint8_t*>(roi_tensor.data()) - reinterpret_cast<uint8_t*>(t.data()), ref_offset_bytes);
ASSERT_EQ(roi_tensor.get_strides(), t.get_strides());
ASSERT_EQ(byteStrides(ref_strides, roi_tensor.get_element_type()), roi_tensor.get_strides());
ASSERT_EQ(roi_tensor.get_element_type(), t.get_element_type());
}
TEST_F(OVTensorTest, cannotSetShapeOnRoiTensor) {
ov::Tensor t{ov::element::i32, {1, 3, 6, 5}}; // RGBp picture of size (WxH) = 5x6
ov::Tensor roi_tensor{t, {0, 0, 1, 2}, {1, 3, 5, 4}};
@ -329,6 +555,14 @@ TEST_F(OVTensorTest, cannotSetShapeOnRoiTensor) {
ASSERT_THROW(roi_tensor.set_shape(newShape), ov::Exception);
}
TEST_F(OVTensorTest, cannotSetShapeOnRoiStringTensor) {
ov::Tensor t{ov::element::string, {1, 3, 6, 5}}; // RGBp picture of size (WxH) = 5x6
ov::Tensor roi_tensor{t, {0, 0, 1, 2}, {1, 3, 5, 4}};
const ov::Shape newShape({4, 5, 6});
ASSERT_THROW(roi_tensor.set_shape(newShape), ov::Exception);
}
TEST_F(OVTensorTest, tensorInt4DataAccess) {
ov::Tensor t{ov::element::i4, {1, 6, 5, 3}}; // RGB picture of size (WxH) = 5x6
ASSERT_THROW((ov::Tensor{t, {0, 1, 2, 0}, {1, 5, 4, 3}}), ov::Exception);
@ -368,6 +602,31 @@ TEST_F(OVTensorTest, readRangeRoiBlob) {
}
}
TEST_F(OVTensorTest, readRangeRoiBlobStringTensor) {
ov::Tensor t{ov::element::string, {1, 3, 4, 8}};
{
const auto origPtr = t.data<std::string>();
ASSERT_NE(nullptr, origPtr);
for (size_t i = 0; i < t.get_size(); ++i) {
origPtr[i] = std::to_string(i);
}
}
ov::Tensor roi_tensor{t, {0, 0, 2, 4}, {1, 3, 4, 8}};
ASSERT_NE(false, static_cast<bool>(roi_tensor));
{
const std::uint8_t* roi = static_cast<const std::uint8_t*>(roi_tensor.data());
ASSERT_NE(nullptr, roi);
auto strides = roi_tensor.get_strides();
for (auto&& c : ov::CoordinateTransformBasic{roi_tensor.get_shape()}) {
auto actual_addr = roi + c[3] * strides[3] + c[2] * strides[2] + c[1] * strides[1] + c[0] * strides[0];
auto expected_addr = t.data<std::string>() + ((c[3] + 4) * strides[3] + (c[2] + 2) * strides[2] +
(c[1] + 0) * strides[1] + (c[0] + 0) * strides[0]) /
t.get_element_type().size();
ASSERT_EQ(actual_addr, static_cast<uint8_t*>(static_cast<void*>(expected_addr)));
}
}
}
struct TestParams {
ov::Shape src_shape;
ov::Strides src_strides;
@ -403,7 +662,9 @@ void compare_data(const ov::Tensor& src, const ov::Tensor& dst) {
}
};
template <class T>
template <ov::element::Type_t ET,
typename T = typename ov::element_type_traits<ET>::value_type,
typename std::enable_if<ET != ov::element::Type_t::string, bool>::type = true>
void init_tensor(const ov::Tensor& tensor, bool input) {
const auto origPtr = tensor.data<T>();
ASSERT_NE(nullptr, origPtr);
@ -412,43 +673,57 @@ void init_tensor(const ov::Tensor& tensor, bool input) {
}
}
template <ov::element::Type_t ET,
typename T = typename ov::element_type_traits<ET>::value_type,
typename std::enable_if<ET == ov::element::Type_t::string, bool>::type = true>
void init_tensor(const ov::Tensor& tensor, bool input) {
const auto origPtr = tensor.data<T>();
ASSERT_NE(nullptr, origPtr);
for (size_t i = 0; i < tensor.get_size(); ++i) {
origPtr[i] = std::to_string(i);
}
}
void init_tensor(const ov::Tensor& tensor, bool input) {
switch (tensor.get_element_type()) {
case ov::element::bf16:
init_tensor<ov::element_type_traits<ov::element::bf16>::value_type>(tensor, input);
init_tensor<ov::element::bf16>(tensor, input);
break;
case ov::element::f16:
init_tensor<ov::element_type_traits<ov::element::f16>::value_type>(tensor, input);
init_tensor<ov::element::f16>(tensor, input);
break;
case ov::element::f32:
init_tensor<ov::element_type_traits<ov::element::f32>::value_type>(tensor, input);
init_tensor<ov::element::f32>(tensor, input);
break;
case ov::element::f64:
init_tensor<ov::element_type_traits<ov::element::f64>::value_type>(tensor, input);
init_tensor<ov::element::f64>(tensor, input);
break;
case ov::element::i8:
init_tensor<ov::element_type_traits<ov::element::i8>::value_type>(tensor, input);
init_tensor<ov::element::i8>(tensor, input);
break;
case ov::element::i16:
init_tensor<ov::element_type_traits<ov::element::i16>::value_type>(tensor, input);
init_tensor<ov::element::i16>(tensor, input);
break;
case ov::element::i32:
init_tensor<ov::element_type_traits<ov::element::i32>::value_type>(tensor, input);
init_tensor<ov::element::i32>(tensor, input);
break;
case ov::element::i64:
init_tensor<ov::element_type_traits<ov::element::i64>::value_type>(tensor, input);
init_tensor<ov::element::i64>(tensor, input);
break;
case ov::element::u8:
init_tensor<ov::element_type_traits<ov::element::u8>::value_type>(tensor, input);
init_tensor<ov::element::u8>(tensor, input);
break;
case ov::element::u16:
init_tensor<ov::element_type_traits<ov::element::u16>::value_type>(tensor, input);
init_tensor<ov::element::u16>(tensor, input);
break;
case ov::element::u32:
init_tensor<ov::element_type_traits<ov::element::u32>::value_type>(tensor, input);
init_tensor<ov::element::u32>(tensor, input);
break;
case ov::element::u64:
init_tensor<ov::element_type_traits<ov::element::u64>::value_type>(tensor, input);
init_tensor<ov::element::u64>(tensor, input);
break;
case ov::element::string:
init_tensor<ov::element::string>(tensor, input);
break;
default:
OPENVINO_THROW("Unsupported data type");
@ -496,6 +771,9 @@ void compare_tensors(const ov::Tensor& src, const ov::Tensor& dst) {
case ov::element::u64:
compare_data<ov::element_type_traits<ov::element::u64>::value_type>(src, dst);
break;
case ov::element::string:
compare_data<ov::element_type_traits<ov::element::string>::value_type>(src, dst);
break;
default:
OPENVINO_THROW("Unsupported data type");
}
@ -578,4 +856,38 @@ INSTANTIATE_TEST_SUITE_P(copy_tests,
{1}, {}
}
)));
INSTANTIATE_TEST_SUITE_P(copy_tests_strings,
OVTensorTestCopy,
::testing::Combine(::testing::Values(ov::element::string),
::testing::Values(
TestParams {
ov::Shape{1, 3, 4, 8}, {},
{0}, {}
},
TestParams {
ov::Shape{3, 2, 2}, {},
ov::Shape{3, 2, 2}, ov::Strides{16 * string_size, 3 * string_size, string_size}
},
TestParams {
ov::Shape{3, 2, 2}, ov::Strides{8 * string_size, 2 * string_size, string_size},
ov::Shape{3, 2, 2}, ov::Strides{}
},
TestParams {
ov::Shape{3, 2, 2}, ov::Strides{8 * string_size, 2 * string_size, string_size},
ov::Shape{3, 2, 2}, ov::Strides{16 * string_size, 3 * string_size, string_size}
},
TestParams {
ov::Shape{}, {},
{}, {}
},
TestParams {
ov::Shape{1}, {},
{}, {}
},
TestParams {
ov::Shape{}, {},
{1}, {}
}
)));
// clang-format on

View File

@ -98,7 +98,8 @@ INSTANTIATE_TEST_SUITE_P(
std::make_tuple("loop_2d_add.xml", "loop_2d_add.bin"),
std::make_tuple("nms5_dynamism.xml", "nms5_dynamism.bin"),
std::make_tuple("if_diff_case.xml", "if_diff_case.bin"),
std::make_tuple("if_body_without_parameters.xml", "if_body_without_parameters.bin")));
std::make_tuple("if_body_without_parameters.xml", "if_body_without_parameters.bin"),
std::make_tuple("string_parameter.xml", "string_parameter.bin")));
#ifdef ENABLE_OV_ONNX_FRONTEND

View File

@ -34,6 +34,13 @@ TEST(type_prop, tensor_constant_deduce_bool) {
ASSERT_EQ(c->get_shape(), (ov::Shape{2, 2}));
}
TEST(type_prop, tensor_constant_deduce_string) {
auto c =
ov::op::v0::Constant::create(ov::element::string, ov::Shape{2, 2}, vector<std::string>{"1", "2", "3", "4"});
ASSERT_EQ(c->get_element_type(), ov::element::string);
ASSERT_EQ(c->get_shape(), (ov::Shape{2, 2}));
}
TEST(type_prop, tensor_constant_bad_count) {
try {
auto c = ov::op::v0::Constant::create(ov::element::boolean, ov::Shape{2, 2}, {1, 1, 1});
@ -53,3 +60,10 @@ TEST(type_prop, constant_zero_elements_one_string) {
ASSERT_EQ(c->get_element_type(), ov::element::i64);
ASSERT_EQ(c->get_shape(), (ov::Shape{2, 0, 2, 2}));
}
TEST(type_prop, constant_zero_elements_ov_string) {
auto c =
make_shared<ov::op::v0::Constant>(ov::element::string, ov::Shape{2, 0, 2, 2}, std::vector<std::string>{"42"});
ASSERT_EQ(c->get_element_type(), ov::element::string);
ASSERT_EQ(c->get_shape(), (ov::Shape{2, 0, 2, 2}));
}

View File

@ -84,3 +84,67 @@ TEST(attributes, constant_op_from_host_tensor_identical_elements) {
EXPECT_EQ(data, g_data);
ASSERT_TRUE(g_k->get_all_data_elements_bitwise_identical());
}
// TODO: implement (de)serialization string constants
TEST(attributes, DISABLED_constant_op_string) {
vector<std::string> data{"abc", "de fc qq", "", "123 abc", "0112 3 ", "&&&"};
auto k = make_shared<op::v0::Constant>(element::string, Shape{2, 3}, data);
NodeBuilder builder(k);
auto g_k = ov::as_type_ptr<op::v0::Constant>(builder.create());
g_k->validate_and_infer_types();
ASSERT_TRUE(g_k);
EXPECT_EQ(k->get_element_type(), g_k->get_element_type());
EXPECT_EQ(k->get_shape(), g_k->get_shape());
vector<std::string> g_data = g_k->get_vector<std::string>();
EXPECT_EQ(data, g_data);
ASSERT_FALSE(g_k->get_all_data_elements_bitwise_identical());
}
// TODO: implement (de)serialization string constants
TEST(attributes, DISABLED_constant_op_identical_elements_string) {
vector<std::string> data{"abc edfg", "abc edfg", "abc edfg", "abc edfg", "abc edfg", "abc edfg"};
auto k = make_shared<op::v0::Constant>(element::string, Shape{2, 3}, data);
NodeBuilder builder(k);
auto g_k = ov::as_type_ptr<op::v0::Constant>(builder.create());
g_k->validate_and_infer_types();
ASSERT_TRUE(g_k);
EXPECT_EQ(k->get_element_type(), g_k->get_element_type());
EXPECT_EQ(k->get_shape(), g_k->get_shape());
vector<std::string> g_data = g_k->get_vector<std::string>();
EXPECT_EQ(data, g_data);
ASSERT_TRUE(g_k->get_all_data_elements_bitwise_identical());
}
// TODO: implement (de)serialization string constants
TEST(attributes, DISABLED_constant_op_from_host_tensor_different_elements_string) {
vector<std::string> data{"abc", "de fc qq", "", "123 abc", "0112 3 ", "&&&"};
auto tensor = ov::Tensor(element::string, Shape{2, 3}, &data[0]);
auto k = make_shared<op::v0::Constant>(tensor);
ASSERT_FALSE(k->get_all_data_elements_bitwise_identical());
NodeBuilder builder(k);
auto g_k = ov::as_type_ptr<op::v0::Constant>(builder.create());
g_k->validate_and_infer_types();
ASSERT_TRUE(g_k);
EXPECT_EQ(k->get_element_type(), g_k->get_element_type());
EXPECT_EQ(k->get_shape(), g_k->get_shape());
vector<std::string> g_data = g_k->get_vector<std::string>();
EXPECT_EQ(data, g_data);
ASSERT_FALSE(g_k->get_all_data_elements_bitwise_identical());
}
// TODO: implement (de)serialization string constants
TEST(attributes, DISABLED_constant_op_from_host_tensor_identical_elements_string) {
vector<std::string> data{"abc edfg", "abc edfg", "abc edfg", "abc edfg", "abc edfg", "abc edfg"};
auto tensor = ov::Tensor(element::string, Shape{2, 3}, &data[0]);
auto k = make_shared<op::v0::Constant>(tensor);
ASSERT_TRUE(k->get_all_data_elements_bitwise_identical());
NodeBuilder builder(k);
auto g_k = ov::as_type_ptr<op::v0::Constant>(builder.create());
g_k->validate_and_infer_types();
ASSERT_TRUE(g_k);
EXPECT_EQ(k->get_element_type(), g_k->get_element_type());
EXPECT_EQ(k->get_shape(), g_k->get_shape());
vector<std::string> g_data = g_k->get_vector<std::string>();
EXPECT_EQ(data, g_data);
ASSERT_TRUE(g_k->get_all_data_elements_bitwise_identical());
}

View File

@ -63,7 +63,7 @@ void copy_conditional_flow_markers_for_producer(
}
template <typename T>
void extract_tensor_content(const string& tensor_content, Tensor* values) {
void extract_tensor_content(const std::string& tensor_content, Tensor* values) {
const auto tensor_content_size = tensor_content.size();
FRONT_END_GENERAL_CHECK(tensor_content_size % sizeof(T) == 0,
"Size of tensor_content (",
@ -185,7 +185,7 @@ Any unpack_tensor_proto(const ::tensorflow::TensorProto& tensor_proto,
ov_type.is_static(),
"Encountered unknown element type " + DataType_Name(tensor_type) + " on an empty tensor_proto");
} else {
auto data = vector<string>();
auto data = vector<std::string>();
for (const auto& item : tensor_proto.string_val()) {
data.push_back(item);
}

View File

@ -19,8 +19,8 @@ TEST_F(FrontEndConversionWithReferenceTestsF, SavedModelProgramOnly) {
model = convert_model("saved_model_program-only");
// check tensor names in the resulted model
unordered_set<string> input_tensor_names = {"y"};
unordered_set<string> output_tensor_names = {"z"};
unordered_set<std::string> input_tensor_names = {"y"};
unordered_set<std::string> output_tensor_names = {"z"};
ASSERT_EQ(model->get_results().size(), 1);
ASSERT_TRUE(model->get_results()[0]->input_value(0).get_names() == output_tensor_names);
ASSERT_EQ(model->get_parameters().size(), 1);
@ -61,9 +61,9 @@ TEST_F(FrontEndConversionWithReferenceTestsF, SavedModelWithInputIntegerType) {
{PartialShape{10, 5}, PartialShape{3}});
// check tensor names in the resulted model
unordered_set<string> input_tensor_name1 = {"params"};
unordered_set<string> input_tensor_name2 = {"indices"};
unordered_set<string> output_tensor_names = {"test_output_name"};
unordered_set<std::string> input_tensor_name1 = {"params"};
unordered_set<std::string> input_tensor_name2 = {"indices"};
unordered_set<std::string> output_tensor_names = {"test_output_name"};
ASSERT_EQ(model->get_results().size(), 1);
ASSERT_TRUE(model->get_results()[0]->input_value(0).get_names() == output_tensor_names);
ASSERT_EQ(model->get_parameters().size(), 2);
@ -97,7 +97,7 @@ TEST_F(FrontEndConversionWithReferenceTestsF, SavedModelMultipleTensorNames) {
model = convert_model("saved_model_parameter_result");
// check tensor names in the resulted model
unordered_set<string> tensor_names = {"params", "test_output_name"};
unordered_set<std::string> tensor_names = {"params", "test_output_name"};
ASSERT_EQ(model->get_results().size(), 1);
ASSERT_TRUE(model->get_results()[0]->input_value(0).get_names() == tensor_names);
ASSERT_EQ(model->get_parameters().size(), 1);

View File

@ -55,13 +55,13 @@ private:
const std::string m_op_type;
};
shared_ptr<Model> convert_model_partially(const string& model_path) {
shared_ptr<Model> convert_model_partially(const std::string& model_path) {
FrontEndManager fem;
auto front_end = fem.load_by_framework(TF_FE);
if (!front_end) {
throw "TensorFlow Frontend is not initialized";
}
auto model_filename = FrontEndTestUtils::make_model_path(string(TEST_TENSORFLOW_MODELS_DIRNAME) + model_path);
auto model_filename = FrontEndTestUtils::make_model_path(std::string(TEST_TENSORFLOW_MODELS_DIRNAME) + model_path);
auto input_model = front_end->load(model_filename);
if (!input_model) {
throw "Input model is not read";
@ -95,8 +95,8 @@ TEST(FrontEndConvertModelTest, test_unsupported_op) {
InputModel::Ptr inputModel;
ASSERT_NO_THROW(frontEnd = fem.load_by_framework(TF_FE));
ASSERT_NE(frontEnd, nullptr);
auto model_filename = FrontEndTestUtils::make_model_path(string(TEST_TENSORFLOW_MODELS_DIRNAME) +
string("relu_unsupported/relu_unsupported.pb"));
auto model_filename = FrontEndTestUtils::make_model_path(std::string(TEST_TENSORFLOW_MODELS_DIRNAME) +
std::string("relu_unsupported/relu_unsupported.pb"));
ASSERT_NO_THROW(inputModel = frontEnd->load(model_filename));
ASSERT_NE(inputModel, nullptr);
shared_ptr<ov::Model> model;
@ -141,13 +141,13 @@ TEST(FrontEndConvertModelTest, test_unsupported_tf1_while_and_incorrect_less_tra
"passed without errors. "
"OpConversionFailure is expected.";
} catch (const OpConversionFailure& error) {
string error_message = error.what();
string ref_message = "Less expects ten inputs.\n";
string not_found_message =
std::string error_message = error.what();
std::string ref_message = "Less expects ten inputs.\n";
std::string not_found_message =
"[TensorFlow Frontend] Internal error, no translator found for operation(s): Enter, Exit, "
"LoopCond, Merge, NextIteration, Switch";
ASSERT_TRUE(error_message.find(ref_message) != string::npos);
ASSERT_TRUE(error_message.find(not_found_message) == string::npos);
ASSERT_TRUE(error_message.find(ref_message) != std::string::npos);
ASSERT_TRUE(error_message.find(not_found_message) == std::string::npos);
ASSERT_EQ(model, nullptr);
} catch (...) {
FAIL() << "Conversion of TensorFlow 1 While failed by wrong reason.";
@ -164,13 +164,13 @@ TEST(FrontEndConvertModelTest, conversion_with_unknown_exception) {
"passed without errors. "
"OpConversionFailure is expected.";
} catch (const OpConversionFailure& error) {
string error_message = error.what();
string ref_message = "Unknown exception type\n";
string doc_message =
std::string error_message = error.what();
std::string ref_message = "Unknown exception type\n";
std::string doc_message =
"To facilitate the conversion of unsupported operations, refer to Frontend Extension documentation: "
"https://docs.openvino.ai/latest/openvino_docs_Extensibility_UG_Frontend_Extensions.html";
ASSERT_TRUE(error_message.find(ref_message) != string::npos);
ASSERT_TRUE(error_message.find(doc_message) == string::npos);
ASSERT_TRUE(error_message.find(ref_message) != std::string::npos);
ASSERT_TRUE(error_message.find(doc_message) == std::string::npos);
ASSERT_EQ(model, nullptr);
} catch (...) {
FAIL() << "Conversion of TensorFlow 1 While failed by wrong reason.";
@ -187,11 +187,11 @@ TEST(FrontEndConvertModelTest, test_unsupported_resource_gather_translator) {
"ResourceGather translator. "
"OpConversionFailure is expected.";
} catch (const OpConversionFailure& error) {
string error_message = error.what();
string ref_message = "Less expects ten inputs.\n";
string no_ref_message = "[TensorFlow Frontend] Internal error: No translator found for";
ASSERT_TRUE(error_message.find(ref_message) != string::npos);
ASSERT_TRUE(error_message.find(no_ref_message) == string::npos);
std::string error_message = error.what();
std::string ref_message = "Less expects ten inputs.\n";
std::string no_ref_message = "[TensorFlow Frontend] Internal error: No translator found for";
ASSERT_TRUE(error_message.find(ref_message) != std::string::npos);
ASSERT_TRUE(error_message.find(no_ref_message) == std::string::npos);
ASSERT_EQ(model, nullptr);
} catch (...) {
FAIL() << "Conversion of the model with ResourceGather failed by wrong reason.";
@ -204,10 +204,10 @@ TEST(FrontEndConvertModelTest, test_unsupported_operation_conversion_with_reason
model = convert_model("gather_with_string_table/gather_with_string_table.pb");
FAIL() << "The model with Const of string type must not be converted.";
} catch (const OpConversionFailure& error) {
string error_message = error.what();
string ref_message =
std::string error_message = error.what();
std::string ref_message =
"[TensorFlow Frontend] Internal error, no translator found for operation(s): Const of string type";
ASSERT_TRUE(error_message.find(ref_message) != string::npos);
ASSERT_TRUE(error_message.find(ref_message) != std::string::npos);
ASSERT_EQ(model, nullptr);
} catch (...) {
FAIL() << "Conversion of the model with Const of string type failed by wrong reason.";

View File

@ -75,8 +75,8 @@ TEST(TFTelemetryTest, test_nonexistent_add) {
m_test_telemetry.clear();
EXPECT_NO_THROW(frontEnd->add_extension(telemetry_extension));
auto model_filename = FrontEndTestUtils::make_model_path(string(TEST_TENSORFLOW_MODELS_DIRNAME) +
string("nonexistent_add/nonexistent_add.pb"));
auto model_filename = FrontEndTestUtils::make_model_path(std::string(TEST_TENSORFLOW_MODELS_DIRNAME) +
std::string("nonexistent_add/nonexistent_add.pb"));
ASSERT_NO_THROW(inputModel = frontEnd->load(model_filename));
ASSERT_NE(inputModel, nullptr);
shared_ptr<ov::Model> model;
@ -85,9 +85,9 @@ TEST(TFTelemetryTest, test_nonexistent_add) {
model = frontEnd->convert(inputModel);
FAIL() << "Non-existent operation Adddd must not be supported by TF FE.";
} catch (const OpConversionFailure& error) {
string error_message = error.what();
string ref_message = "Internal error, no translator found for operation(s): Adddd";
ASSERT_TRUE(error_message.find(ref_message) != string::npos);
std::string error_message = error.what();
std::string ref_message = "Internal error, no translator found for operation(s): Adddd";
ASSERT_TRUE(error_message.find(ref_message) != std::string::npos);
ASSERT_EQ(model, nullptr);
// check telemetry data

View File

@ -18,9 +18,9 @@ namespace tests {
const std::string TF_FE = "tf";
shared_ptr<Model> convert_model(const string& model_path,
shared_ptr<Model> convert_model(const std::string& model_path,
const ConversionExtension::Ptr& conv_ext,
const vector<string>& input_names,
const vector<std::string>& input_names,
const vector<element::Type>& input_types,
const vector<PartialShape>& input_shapes,
const std::vector<std::string>& input_names_to_freeze,
@ -34,7 +34,7 @@ shared_ptr<Model> convert_model(const string& model_path,
if (conv_ext) {
front_end->add_extension(conv_ext);
}
auto model_filename = FrontEndTestUtils::make_model_path(string(TEST_TENSORFLOW_MODELS_DIRNAME) + model_path);
auto model_filename = FrontEndTestUtils::make_model_path(std::string(TEST_TENSORFLOW_MODELS_DIRNAME) + model_path);
ov::frontend::InputModel::Ptr input_model;
if (!disable_mmap) {
input_model = front_end->load(model_filename);

View File

@ -127,6 +127,7 @@ InferenceEngine::Blob::Ptr make_blob_with_precision(InferenceEngine::Precision p
USE_FACTORY(BIN);
USE_FACTORY(BF16);
USE_FACTORY(BOOL);
USE_FACTORY(STRING);
default:
IE_THROW() << "cannot locate blob for precision: " << precision;
}

View File

@ -55,6 +55,8 @@ INFERENCE_ENGINE_1_0_DEPRECATED inline ::ngraph::element::Type convertPrecision(
return ::ngraph::element::Type(::ngraph::element::Type_t::u1);
case Precision::NF4:
return ::ngraph::element::Type(::ngraph::element::Type_t::nf4);
case Precision::STRING:
return ::ngraph::element::Type(::ngraph::element::Type_t::string);
case Precision::Q78:
case Precision::MIXED:
case Precision::CUSTOM:
@ -105,6 +107,8 @@ INFERENCE_ENGINE_1_0_DEPRECATED inline Precision convertPrecision(const ::ngraph
return Precision(Precision::BOOL);
case ::ngraph::element::Type_t::nf4:
return Precision(Precision::NF4);
case ::ngraph::element::Type_t::string:
return Precision(Precision::STRING);
case ::ngraph::element::Type_t::dynamic:
return Precision(Precision::UNSPECIFIED);
default:

View File

@ -872,6 +872,7 @@ extern template class INFERENCE_ENGINE_API_CLASS(InferenceEngine::TBlob<unsigned
extern template class INFERENCE_ENGINE_API_CLASS(InferenceEngine::TBlob<unsigned long long>);
extern template class INFERENCE_ENGINE_API_CLASS(InferenceEngine::TBlob<bool>);
extern template class INFERENCE_ENGINE_API_CLASS(InferenceEngine::TBlob<char>);
extern template class INFERENCE_ENGINE_API_CLASS(InferenceEngine::TBlob<std::string>);
#endif
/**

View File

@ -55,6 +55,7 @@ public:
U64 = 73, /**< 64bit unsigned integer value */
BIN = 71, /**< 1bit integer value */
BOOL = 41, /**< 8bit bool type */
STRING = 79, /**< string type, std::string in C++ */
CUSTOM = 80 /**< custom precision has it's own name and size of elements */
};
@ -146,6 +147,7 @@ public:
CASE(BOOL, uint8_t);
CASE2(Q78, int16_t, uint16_t);
CASE2(BIN, int8_t, uint8_t);
CASE(STRING, std::string);
default:
return areSameStrings(name(), typeName == nullptr ? typeid(T).name() : typeName);
#undef CASE
@ -251,11 +253,11 @@ public:
static Precision FromStr(const std::string& str) {
static const std::unordered_map<std::string, ePrecision> names = {
#define PRECISION_NAME(s) {#s, s}
PRECISION_NAME(Q78), PRECISION_NAME(BOOL), PRECISION_NAME(BF16), PRECISION_NAME(I4),
PRECISION_NAME(I8), PRECISION_NAME(I16), PRECISION_NAME(I32), PRECISION_NAME(I64),
PRECISION_NAME(U4), PRECISION_NAME(U8), PRECISION_NAME(U16), PRECISION_NAME(U32),
PRECISION_NAME(U64), PRECISION_NAME(FP32), PRECISION_NAME(FP64), PRECISION_NAME(FP16),
PRECISION_NAME(MIXED), PRECISION_NAME(NF4), PRECISION_NAME(BIN),
PRECISION_NAME(Q78), PRECISION_NAME(BOOL), PRECISION_NAME(BF16), PRECISION_NAME(I4),
PRECISION_NAME(I8), PRECISION_NAME(I16), PRECISION_NAME(I32), PRECISION_NAME(I64),
PRECISION_NAME(U4), PRECISION_NAME(U8), PRECISION_NAME(U16), PRECISION_NAME(U32),
PRECISION_NAME(U64), PRECISION_NAME(FP32), PRECISION_NAME(FP64), PRECISION_NAME(FP16),
PRECISION_NAME(MIXED), PRECISION_NAME(NF4), PRECISION_NAME(STRING), PRECISION_NAME(BIN),
#undef PRECISION_NAME
};
auto i = names.find(str);
@ -364,6 +366,7 @@ protected:
CASE(MIXED);
CASE(BIN);
CASE(BOOL);
CASE(STRING);
default:
return makePrecisionInfo<UNSPECIFIED>("UNSPECIFIED");
#undef CASE
@ -472,6 +475,12 @@ struct INFERENCE_ENGINE_1_0_DEPRECATED PrecisionTrait<Precision::NF4> {
enum { is_float = false };
};
template <>
struct PrecisionTrait<Precision::STRING> {
using value_type = std::string;
enum { is_float = false };
};
template <class T>
INFERENCE_ENGINE_1_0_DEPRECATED inline uint8_t type_size_or_zero() {
return sizeof(T);

View File

@ -37,7 +37,9 @@ public:
void* data(const element::Type& element_type) const override {
if (element_type != element::undefined && element_type != element::dynamic &&
(element_type.bitwidth() != get_element_type().bitwidth() ||
element_type.is_real() != get_element_type().is_real())) {
element_type.is_real() != get_element_type().is_real() ||
(element_type == element::string && get_element_type() != element::string) ||
(element_type != element::string && get_element_type() == element::string))) {
OPENVINO_THROW("Tensor data with element type ",
get_element_type(),
", is not representable as pointer to ",
@ -178,28 +180,63 @@ public:
shape,
[&] {
OPENVINO_ASSERT(allocator, "Allocator was not initialized");
return const_cast<Allocator&>(allocator).allocate(element_type.size() * shape_size(shape));
auto num_elements = shape_size(shape);
auto data = const_cast<Allocator&>(allocator).allocate(element_type.size() * num_elements);
init(data, element_type, shape);
return data;
}()},
m_allocator{allocator} {}
~AllocatedTensor() {
auto num_elements = get_size();
destroy(0, num_elements);
m_allocator.deallocate(m_ptr, get_byte_size());
}
void set_shape(ov::Shape new_shape) override {
if (m_shape == new_shape)
return;
auto old_num_elements = get_size();
auto old_byte_size = get_byte_size();
m_shape = std::move(new_shape);
auto new_num_elements = get_size();
if (get_byte_size() > old_byte_size) {
// allocate buffer and initialize objects from scratch
destroy(0, old_num_elements);
m_allocator.deallocate(m_ptr, old_byte_size);
m_ptr = m_allocator.allocate(get_byte_size());
init(m_ptr, m_element_type, m_shape);
} else {
// destroy only not needed objects
destroy(new_num_elements, old_num_elements);
}
m_strides.clear();
update_strides();
}
private:
void destroy(size_t begin_ind, size_t end_ind) {
// it removes elements from tail
if (get_element_type() == element::Type_t::string) {
auto strings = static_cast<std::string*>(m_ptr);
for (size_t ind = begin_ind; ind < end_ind; ++ind) {
using std::string;
strings[ind].~string();
}
}
}
void init(void* data, const element::Type& element_type, const Shape& shape) {
if (element_type == element::Type_t::string) {
auto num_elements = shape_size(shape);
auto string_ptr = static_cast<std::string*>(data);
std::uninitialized_fill_n(string_ptr, num_elements, std::string());
}
}
Allocator m_allocator;
};

View File

@ -118,6 +118,7 @@ template class TBlob<unsigned long>;
template class TBlob<unsigned long long>;
template class TBlob<bool>;
template class TBlob<char>;
template class TBlob<std::string>;
#endif
} // namespace InferenceEngine

View File

@ -767,7 +767,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_concat_restrictions,
ConvConcatConcatNHWCRestrictionsPos::getTestCaseName);
const vector<SizeVector> ttc_input_shapes = {{64, 384}};
const vector<map<string, string>> ttc_configs = {
const vector<map<std::string, std::string>> ttc_configs = {
{{"GNA_DEVICE_MODE", "GNA_SW_FP32"}},
{{"GNA_DEVICE_MODE", "GNA_SW_EXACT"}, {"GNA_EXEC_TARGET", "GNA_TARGET_2_0"}},
{{"GNA_DEVICE_MODE", "GNA_SW_EXACT"}, {"GNA_EXEC_TARGET", "GNA_TARGET_3_0"}},

View File

@ -27,8 +27,8 @@ using namespace ngraph::op;
using namespace ngraph::opset9;
using namespace std;
using DiagonalInsertionTestParams = tuple<map<string, string>, // Configuration
vector<vector<float>> // FakeQuantize min/max params
using DiagonalInsertionTestParams = tuple<map<std::string, std::string>, // Configuration
vector<vector<float>> // FakeQuantize min/max params
>;
constexpr uint16_t fq_levels = numeric_limits<uint16_t>::max();
@ -89,13 +89,13 @@ class DiagonalInsertionTest : public testing::WithParamInterface<DiagonalInserti
return std::make_shared<Reshape>(input_node, target_shape_const, false);
}
bool IsDebugEnabled(map<string, string>& configuration) {
bool IsDebugEnabled(map<std::string, std::string>& configuration) {
return configuration.find("LOG_LEVEL") != configuration.end() && configuration["LOG_LEVEL"] == "LOG_DEBUG";
}
public:
static string getTestCaseName(testing::TestParamInfo<DiagonalInsertionTestParams> obj) {
map<string, string> configuration;
static std::string getTestCaseName(testing::TestParamInfo<DiagonalInsertionTestParams> obj) {
map<std::string, std::string> configuration;
vector<vector<float>> fq_min_max;
tie(configuration, fq_min_max) = obj.param;
@ -156,7 +156,7 @@ TEST_P(DiagonalInsertionTest, CompareWithRefs) {
Run();
};
const vector<map<string, string>> configs = {
const vector<map<std::string, std::string>> configs = {
{
{"GNA_DEVICE_MODE", "GNA_SW_EXACT"},
{"GNA_PRECISION", "I16"},

View File

@ -29,12 +29,12 @@ using namespace ov::op;
using namespace ov::opset12;
using namespace std;
using ConvLowPrecisionTestParams = tuple<InferenceEngine::Precision, // Network Precision
string, // Target Device
map<string, string>, // Configuration
Shape, // Input Shape
pair<float, float>, // FQ Min and Max (before conv)
std::size_t // Levels
using ConvLowPrecisionTestParams = tuple<InferenceEngine::Precision, // Network Precision
std::string, // Target Device
map<std::string, std::string>, // Configuration
Shape, // Input Shape
pair<float, float>, // FQ Min and Max (before conv)
std::size_t // Levels
>;
class ConvLowPrecisionTest : public testing::WithParamInterface<ConvLowPrecisionTestParams>,
@ -44,10 +44,10 @@ class ConvLowPrecisionTest : public testing::WithParamInterface<ConvLowPrecision
float inputDataResolution = 1.0f;
public:
static string getTestCaseName(testing::TestParamInfo<ConvLowPrecisionTestParams> obj) {
static std::string getTestCaseName(testing::TestParamInfo<ConvLowPrecisionTestParams> obj) {
InferenceEngine::Precision netPrecision;
string targetDevice;
map<string, string> configuration;
std::string targetDevice;
map<std::string, std::string> configuration;
Shape inputShape;
pair<float, float> fqMinMax;
std::size_t levels = 0;
@ -129,17 +129,17 @@ TEST_P(ConvLowPrecisionTest, CompareWithRefs) {
const vector<InferenceEngine::Precision> netPrecisions = {InferenceEngine::Precision::FP16,
InferenceEngine::Precision::FP32};
const vector<map<string, string>> configs_1 = {
const vector<map<std::string, std::string>> configs_1 = {
{{"GNA_DEVICE_MODE", "GNA_AUTO"}, {"GNA_PRECISION", "I8"}, {"GNA_EXEC_TARGET", "GNA_TARGET_1_0"}},
{{"GNA_DEVICE_MODE", "GNA_AUTO"}, {"GNA_PRECISION", "I16"}, {"GNA_EXEC_TARGET", "GNA_TARGET_1_0"}},
};
const vector<map<string, string>> configs_2 = {
const vector<map<std::string, std::string>> configs_2 = {
{{"GNA_DEVICE_MODE", "GNA_AUTO"}, {"GNA_PRECISION", "I8"}, {"GNA_EXEC_TARGET", "GNA_TARGET_2_0"}},
{{"GNA_DEVICE_MODE", "GNA_AUTO"}, {"GNA_PRECISION", "I16"}, {"GNA_EXEC_TARGET", "GNA_TARGET_2_0"}},
};
const vector<map<string, string>> configs_3 = {
const vector<map<std::string, std::string>> configs_3 = {
{{"GNA_DEVICE_MODE", "GNA_AUTO"}, {"GNA_PRECISION", "I8"}, {"GNA_EXEC_TARGET", "GNA_TARGET_3_0"}},
{{"GNA_DEVICE_MODE", "GNA_AUTO"}, {"GNA_PRECISION", "I16"}, {"GNA_EXEC_TARGET", "GNA_TARGET_3_0"}},
{{"GNA_DEVICE_MODE", "GNA_AUTO"}, {"GNA_PRECISION", "I8"}, {"GNA_EXEC_TARGET", "GNA_TARGET_3_5"}},

View File

@ -81,7 +81,7 @@ typedef tuple<size_t, // Concat axis
class InsertIdentityLayerConcatTest : public InsertIdentityLayerTest,
public ::testing::WithParamInterface<InsertIdentityConcatTestParams> {
public:
static string getTestCaseName(const testing::TestParamInfo<InsertIdentityConcatTestParams>& obj) {
static std::string getTestCaseName(const testing::TestParamInfo<InsertIdentityConcatTestParams>& obj) {
size_t axis, inputs_num;
tie(axis, inputs_num) = obj.param;
@ -198,7 +198,7 @@ typedef tuple<ELTWISE_TYPE, // eltwise type
class InsertIdentityLayerEltwiseTest : public InsertIdentityLayerTest,
public ::testing::WithParamInterface<InsertIdentityEltwiseTestParams> {
public:
static string getTestCaseName(const testing::TestParamInfo<InsertIdentityEltwiseTestParams>& obj) {
static std::string getTestCaseName(const testing::TestParamInfo<InsertIdentityEltwiseTestParams>& obj) {
ELTWISE_TYPE type;
bool low_precision, both_inputs_32bits;
tie(type, low_precision, both_inputs_32bits) = obj.param;
@ -472,7 +472,7 @@ typedef tuple<bool, // with pooling
class InsertIdentityLayerConvMatMulTest : public InsertIdentityLayerTest,
public ::testing::WithParamInterface<InsertIdentityConvTestParams> {
public:
static string getTestCaseName(const testing::TestParamInfo<InsertIdentityConvTestParams>& obj) {
static std::string getTestCaseName(const testing::TestParamInfo<InsertIdentityConvTestParams>& obj) {
bool with_pool, with_act, swap_matmul;
tie(with_pool, with_act, swap_matmul) = obj.param;
@ -613,7 +613,7 @@ TEST_F(InsertIdentityLayerResultTest, CompareWithRefs) {
}
class InsertIdentityForNonQuantizableConcatInputTest : public InsertIdentityLayerTest {
string getName() {
std::string getName() {
return "InsertIdentityForPrecAgnosticConcatInput";
}