[core] Remove tensor conversion utils from new API (#21396)

* Remove usage of util::wrap_tensor

* Remove tensor conversion utils
make it local in model to remove with legacy evaluate

* Make only output tensor dynamic if Shape{0}

* Evaluate fixes on HostTensor

---------

Co-authored-by: Michal Lukaszewski <michal.lukaszewski@intel.com>
This commit is contained in:
Pawel Raasz 2023-12-05 12:11:43 +01:00 committed by GitHub
parent 725aae4dbd
commit c18041ab53
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
16 changed files with 106 additions and 231 deletions

View File

@ -9,24 +9,6 @@
namespace ov {
namespace util {
/**
* @brief Makes spacial version of 2D ov::Shape which is recognize as dynamic.
*
* This is special case used for tensor <-> host tensor conversion to indicate that tensor got dynamic shape.
*
* @return 2-D shape with {0, SIZE_MAX}
*/
OPENVINO_DEPRECATED("This function is deprecated and will be removed soon.")
OPENVINO_API Shape make_dynamic_shape();
/**
* @brief Check if Shape is marked as dynamic.
*
* @param s Shape for check.
* @return True if shape is dynamic otherwise false.
*/
OPENVINO_DEPRECATED("This function is deprecated and will be removed soon.")
OPENVINO_API bool is_dynamic_shape(const Shape& s);
/**
* @brief Creates reduced shape from input by removing dimensions.

View File

@ -1,54 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "ngraph/runtime/host_tensor.hpp"
#include "openvino/runtime/tensor.hpp"
namespace ov {
namespace util {
/**
* @brief Wrap host tensor into ov::Tensor.
*
* @param t Input tensor for conversion.
* @return ov::Tensor which points to host tensor data. Can return not allocated or special dynamic depends on input
* tensor state.
*/
OPENVINO_DEPRECATED("This function is deprecated and will be removed soon.")
OPENVINO_API Tensor wrap_tensor(const ngraph::HostTensorPtr& t);
/**
* @brief Wrap node output into ov::Tensor.
*
* @param output Node output to make tensor.
* @return ov::Tensor from output properties.
*/
OPENVINO_DEPRECATED("This function is deprecated and will be removed soon.")
OPENVINO_API Tensor wrap_tensor(const Output<Node>& output);
/**
* @brief Make vector of wrapped tensors.
*
* @param tensors Input vector of host tensor to convert.
* @return ov::TensorVectors, can contains not allocated or dynamic tensor depends on input tensor properties.
*/
OPENVINO_DEPRECATED("This function is deprecated and will be removed soon.")
OPENVINO_API TensorVector wrap_tensors(const std::vector<ngraph::HostTensorPtr>& tensors);
/**
* @brief Update output host tensors if they got dynamic shape before evaluation (not allocated).
*
* Other tensor not requires update as they are created from outputs and points to same data blob.
*
* @param output_values Temporary ov::Tensor vector created from outputs for evaluation
* @param outputs Output host tensors vector to update.
*/
OPENVINO_DEPRECATED("This function is deprecated and will be removed soon.")
OPENVINO_API void update_output_host_tensors(const std::vector<ngraph::HostTensorPtr>& output_values,
const ov::TensorVector& outputs);
} // namespace util
} // namespace ov

View File

@ -12,14 +12,9 @@
namespace ov {
namespace reference {
void function(const std::shared_ptr<Model>& function, const ov::TensorVector& inputs, ov::TensorVector& outputs) {
const auto& results = function->get_results();
outputs.reserve(results.size());
for (size_t i = 0; i < results.size(); ++i) {
OPENVINO_SUPPRESS_DEPRECATED_START
auto shape = results[i]->get_output_partial_shape(0).is_static() ? results[i]->get_output_shape(0)
: ov::util::make_dynamic_shape();
OPENVINO_SUPPRESS_DEPRECATED_END
outputs.push_back(ov::Tensor(results[i]->get_element_type(), shape));
outputs.reserve(function->get_output_size());
for (const auto& result : function->get_results()) {
outputs.emplace_back(result->output(0));
}
function->evaluate(outputs, inputs);
}

View File

@ -10,7 +10,6 @@
#include "openvino/core/shape_util.hpp"
#include "openvino/op/util/symbolic_info.hpp"
#include "openvino/opsets/opset10.hpp"
#include "tensor_conversion_util.hpp"
#include "transformations/rt_info/decompression.hpp"
#include "transformations/rt_info/is_shape_subgraph.hpp"
@ -70,6 +69,10 @@ bool are_equal(const ov::Tensor& lhs, const ov::Tensor& rhs) {
return are_eq;
}
bool is_type_allocable(const element::Type& type) {
return type != element::undefined && type.is_static();
}
ov::Tensor evaluate_bound(const Output<Node>& output, bool is_upper, bool invalidate_all_unused_values = true) {
if (is_upper && output.get_tensor().get_upper_value()) {
return output.get_tensor().get_upper_value();
@ -84,9 +87,11 @@ ov::Tensor evaluate_bound(const Output<Node>& output, bool is_upper, bool invali
for (const auto& node : order) {
ov::TensorVector outputs;
for (const auto& out : node->outputs()) {
OPENVINO_SUPPRESS_DEPRECATED_START
outputs.push_back(util::wrap_tensor(out));
OPENVINO_SUPPRESS_DEPRECATED_END
if (is_type_allocable(out.get_element_type())) {
outputs.emplace_back(out);
} else {
outputs.emplace_back();
}
}
if (is_upper ? node->evaluate_upper(outputs) : node->evaluate_lower(outputs)) {
@ -312,10 +317,13 @@ std::pair<ov::Tensor, ov::Tensor> ov::evaluate_both_bounds(const Output<Node>& o
for (const auto& node : order) {
ov::TensorVector outputs_lower, outputs_upper;
for (const auto& out : node->outputs()) {
OPENVINO_SUPPRESS_DEPRECATED_START
outputs_lower.push_back(util::wrap_tensor(out));
outputs_upper.push_back(util::wrap_tensor(out));
OPENVINO_SUPPRESS_DEPRECATED_END
if (is_type_allocable(out.get_element_type())) {
outputs_lower.emplace_back(out);
outputs_upper.emplace_back(out);
} else {
outputs_lower.emplace_back();
outputs_upper.emplace_back();
}
}
if (!node->evaluate_lower(outputs_lower) || !node->evaluate_upper(outputs_upper)) {
break;
@ -391,7 +399,7 @@ bool ov::interval_bound_evaluator(const Node* node,
node->evaluate(lower_output_values, *input_variants.begin());
auto zero = op::v0::Constant::create(element::i64, {1}, {0});
const auto zero_t = ov::Tensor(element::i64, Shape{1});
const auto zero_t = ov::Tensor(element::i64, Shape{});
*zero_t.data<int64_t>() = 0;
std::vector<TensorVector> unsqueezed_output_variants;
@ -602,9 +610,7 @@ bool ov::default_label_evaluator(const Node* node,
for (size_t i = 0; i < outputs_count; ++i) {
const auto& partial_shape = node->get_output_partial_shape(i);
// Set shape for static or special dynamic if partial shape is dynamic.
OPENVINO_SUPPRESS_DEPRECATED_START
auto shape = partial_shape.is_static() ? partial_shape.to_shape() : util::make_dynamic_shape();
OPENVINO_SUPPRESS_DEPRECATED_END
const auto& shape = partial_shape.is_static() ? partial_shape.to_shape() : Shape{0};
outputs.emplace_back(element::from<label_t>(), shape);
}

View File

@ -21,7 +21,6 @@
#include "openvino/op/util/variable_extension.hpp"
#include "openvino/pass/manager.hpp"
#include "shared_node_info.hpp"
#include "tensor_conversion_util.hpp"
#include "transformations/smart_reshape/smart_reshape.hpp"
using namespace std;
@ -487,24 +486,60 @@ int64_t ov::Model::get_result_index(const Output<const Node>& value) const {
return -1;
}
OPENVINO_SUPPRESS_DEPRECATED_START
namespace {
ov::Tensor wrap_tensor(const ngraph::HostTensorPtr& t) {
const auto& et = t->get_element_type();
const auto& p_shape = t->get_partial_shape();
if (et.is_dynamic() || et == ov::element::undefined) {
return {};
} else if (p_shape.is_static()) {
return {et, p_shape.to_shape(), t->get_data_ptr()};
} else {
return {et, ov::Shape{0}};
}
}
ov::TensorVector wrap_tensors(const std::vector<ngraph::HostTensorPtr>& tensors) {
ov::TensorVector out;
out.reserve(tensors.size());
for (const auto& ht : tensors) {
out.push_back(wrap_tensor(ht));
}
return out;
}
void update_output_host_tensors(const std::vector<ngraph::HostTensorPtr>& output_values,
const ov::TensorVector& outputs) {
OPENVINO_ASSERT(output_values.size() == outputs.size());
for (size_t i = 0; i < output_values.size(); ++i) {
auto& ht = output_values[i];
auto& t = outputs[i];
if (ht->get_partial_shape().is_dynamic()) {
ht->set_element_type(t.get_element_type());
ht->set_shape(t.get_shape());
std::memcpy(ht->get_data_ptr(), t.data(), t.get_byte_size());
}
}
}
} // namespace
bool ov::Model::evaluate(const HostTensorVector& output_tensors, const HostTensorVector& input_tensors) const {
ov::EvaluationContext evaluation_context;
OPENVINO_SUPPRESS_DEPRECATED_START
return evaluate(output_tensors, input_tensors, evaluation_context);
OPENVINO_SUPPRESS_DEPRECATED_END
}
bool ov::Model::evaluate(const HostTensorVector& output_tensors,
const HostTensorVector& input_tensors,
EvaluationContext& evaluation_context) const {
OPENVINO_SUPPRESS_DEPRECATED_START
auto outputs = ov::util::wrap_tensors(output_tensors);
auto inputs = ov::util::wrap_tensors(input_tensors);
auto outputs = wrap_tensors(output_tensors);
auto inputs = wrap_tensors(input_tensors);
bool sts = evaluate(outputs, inputs, evaluation_context);
ov::util::update_output_host_tensors(output_tensors, outputs);
OPENVINO_SUPPRESS_DEPRECATED_END
update_output_host_tensors(output_tensors, outputs);
return sts;
}
OPENVINO_SUPPRESS_DEPRECATED_END
bool ov::Model::evaluate(ov::TensorVector& output_tensors, const ov::TensorVector& input_tensors) const {
ov::EvaluationContext evaluation_context;
@ -550,7 +585,7 @@ bool ov::Model::evaluate(ov::TensorVector& output_tensors,
for (const auto& v : node->outputs()) {
auto it = output_tensor_map.find(v);
if (it == output_tensor_map.end()) {
output_tensors.push_back(util::wrap_tensor(v));
output_tensors.emplace_back(v);
} else {
output_tensors.push_back(it->second);
}

View File

@ -19,7 +19,6 @@
#include "openvino/pass/pattern/matcher.hpp"
#include "shape_validation.hpp"
#include "shared_node_info.hpp"
#include "tensor_conversion_util.hpp"
using namespace std;
@ -720,18 +719,27 @@ protected:
inline ngraph::HostTensorPtr make_tmp_host_tensor(const ov::Tensor& t) {
if (!t) {
return std::make_shared<DynamicTensor>(ov::element::dynamic);
} else if (ov::util::is_dynamic_shape(t.get_shape())) {
} else {
return std::make_shared<ngraph::runtime::HostTensor>(t.get_element_type(), t.get_shape(), t.data());
}
}
inline ngraph::HostTensorPtr make_tmp_out_host_tensor(const ov::Tensor& t) {
if (!t) {
return std::make_shared<DynamicTensor>(ov::element::dynamic);
} else if (t.get_shape() == ov::Shape{0}) {
return std::make_shared<DynamicTensor>(t.get_element_type());
} else {
return std::make_shared<ngraph::runtime::HostTensor>(t.get_element_type(), t.get_shape(), t.data());
}
}
inline ngraph::HostTensorVector create_tmp_tensors(const ov::TensorVector& tensors) {
inline ngraph::HostTensorVector create_tmp_tensors(const ov::TensorVector& tensors, const bool is_output) {
const auto make_tmp_ht = is_output ? make_tmp_out_host_tensor : make_tmp_host_tensor;
ngraph::HostTensorVector result;
result.reserve(tensors.size());
for (const auto& tensor : tensors) {
result.push_back(make_tmp_host_tensor(tensor));
result.push_back(make_tmp_ht(tensor));
}
return result;
}
@ -759,8 +767,8 @@ inline void update_output_tensors(ov::TensorVector& output_values, const ngraph:
} // namespace
bool ov::Node::evaluate(ov::TensorVector& output_values, const ov::TensorVector& input_values) const {
HostTensorVector output = create_tmp_tensors(output_values);
HostTensorVector input = create_tmp_tensors(input_values);
HostTensorVector output = create_tmp_tensors(output_values, true);
HostTensorVector input = create_tmp_tensors(input_values, false);
bool sts = evaluate(output, input);
if (sts)
update_output_tensors(output_values, output);
@ -771,8 +779,8 @@ bool ov::Node::evaluate(ov::TensorVector& output_values,
const ov::TensorVector& input_values,
const ov::EvaluationContext& evaluationContext) const {
// Call evaluate for old implementation with EvaluationContext
HostTensorVector output = create_tmp_tensors(output_values);
HostTensorVector input = create_tmp_tensors(input_values);
HostTensorVector output = create_tmp_tensors(output_values, true);
HostTensorVector input = create_tmp_tensors(input_values, false);
bool sts = evaluate(output, input, evaluationContext);
if (sts)
update_output_tensors(output_values, output);
@ -826,9 +834,13 @@ bool ov::Node::constant_fold(OutputVector& output_values, const OutputVector& in
}
TensorVector output_tensors;
OPENVINO_SUPPRESS_DEPRECATED_START
for (const auto& output : outputs()) {
output_tensors.push_back(ov::util::wrap_tensor(output));
const auto& et = output.get_element_type();
if (et != element::undefined && et.is_static()) {
output_tensors.emplace_back(output);
} else {
output_tensors.emplace_back();
}
}
if (evaluate(output_tensors, input_tensors)) {
@ -838,7 +850,6 @@ bool ov::Node::constant_fold(OutputVector& output_values, const OutputVector& in
}
return true;
}
OPENVINO_SUPPRESS_DEPRECATED_END
return false;
}

View File

@ -120,6 +120,15 @@ bool op::v1::Select::evaluate(const HostTensorVector& output_values, const HostT
OPENVINO_ASSERT(validate_host_tensor_vector(output_values, 1));
OPENVINO_SUPPRESS_DEPRECATED_END
const auto autob = get_auto_broadcast();
auto out_shape = shape_infer(this,
std::vector<PartialShape>{input_values[0]->get_partial_shape(),
input_values[1]->get_partial_shape(),
input_values[2]->get_partial_shape()})[0]
.to_shape();
output_values[0]->set_shape(out_shape);
return detail::evaluate_select(output_values, input_values, autob, output_values[0]->get_element_type());
}

View File

@ -35,13 +35,7 @@ public:
HostTensorWrapper(const ngraph::HostTensorPtr& tensor) : tensor{tensor}, m_type(tensor->get_element_type()) {
const auto& p_shape = tensor->get_partial_shape();
if (p_shape.is_static()) {
m_shape = p_shape.to_shape();
} else {
OPENVINO_SUPPRESS_DEPRECATED_START
m_shape = ov::util::make_dynamic_shape();
OPENVINO_SUPPRESS_DEPRECATED_END
}
m_shape = p_shape.is_static() ? p_shape.to_shape() : ov::Shape{0};
update_strides();
}

View File

@ -65,10 +65,10 @@ void ITensor::copy_to(const std::shared_ptr<ov::ITensor>& dst) const {
" != dst: ",
dst->get_element_type(),
")");
OPENVINO_SUPPRESS_DEPRECATED_START
if (dst->get_shape() == ov::Shape{0} || ov::util::is_dynamic_shape(dst->get_shape()))
if (dst->get_shape() == ov::Shape{0})
dst->set_shape(get_shape());
OPENVINO_SUPPRESS_DEPRECATED_END
OPENVINO_ASSERT(shapes_equal(get_shape(), dst->get_shape()),
"Tensor shapes are not equal. (src: ",
get_shape(),

View File

@ -103,16 +103,6 @@ TContainer replace_container(const TContainer& input, const TAxes& axes) {
}
namespace util {
Shape make_dynamic_shape() {
return Shape{0, std::numeric_limits<size_t>::max()};
}
bool is_dynamic_shape(const Shape& s) {
OPENVINO_SUPPRESS_DEPRECATED_START
static const auto dyn_shape = make_dynamic_shape();
OPENVINO_SUPPRESS_DEPRECATED_END
return s == dyn_shape;
}
Shape reduce(const Shape& input, const AxisSet& axes) {
return ov::reduce_container(input, axes);

View File

@ -1,62 +0,0 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "tensor_conversion_util.hpp"
#include "openvino/core/shape_util.hpp"
namespace ov {
namespace util {
OPENVINO_SUPPRESS_DEPRECATED_START
Tensor wrap_tensor(const ngraph::HostTensorPtr& t) {
const auto& et = t->get_element_type();
const auto& p_shape = t->get_partial_shape();
if (et.is_dynamic() || et == element::undefined) {
return {};
} else if (p_shape.is_static()) {
return {et, p_shape.to_shape(), t->get_data_ptr()};
} else {
return {et, make_dynamic_shape()};
}
}
Tensor wrap_tensor(const Output<Node>& output) {
const auto& et = output.get_element_type();
const auto& p_shape = output.get_partial_shape();
if (et.is_dynamic() || et == element::undefined) {
return {};
} else if (p_shape.is_static()) {
return {et, p_shape.to_shape()};
} else {
return {et, make_dynamic_shape()};
}
}
ov::TensorVector wrap_tensors(const std::vector<ngraph::HostTensorPtr>& tensors) {
ov::TensorVector out;
out.reserve(tensors.size());
for (const auto& ht : tensors) {
out.push_back(ov::util::wrap_tensor(ht));
}
return out;
}
void update_output_host_tensors(const std::vector<ngraph::HostTensorPtr>& output_values,
const ov::TensorVector& outputs) {
OPENVINO_ASSERT(output_values.size() == outputs.size());
for (size_t i = 0; i < output_values.size(); ++i) {
auto& ht = output_values[i];
auto& t = outputs[i];
if (ht->get_partial_shape().is_dynamic()) {
ht->set_element_type(t.get_element_type());
ht->set_shape(t.get_shape());
std::memcpy(ht->get_data_ptr(), t.data(), t.get_byte_size());
}
}
}
OPENVINO_SUPPRESS_DEPRECATED_END
} // namespace util
} // namespace ov

View File

@ -14,7 +14,6 @@
#include "openvino/core/model.hpp"
#include "openvino/op/parameter.hpp"
#include "openvino/op/relu.hpp"
#include "tensor_conversion_util.hpp"
using namespace std;
using namespace ov;
@ -35,24 +34,6 @@ TEST(tensor, tensor_names) {
ASSERT_EQ(f0->get_result()->input_value(0).get_tensor().get_names(), relu->get_output_tensor(0).get_names());
}
TEST(tensor, wrap_tensor_with_unspecified_type) {
auto param = std::make_shared<ov::op::v0::Parameter>(element::undefined, ov::PartialShape{});
OPENVINO_SUPPRESS_DEPRECATED_START
auto tensor = ov::util::wrap_tensor(param->output(0));
OPENVINO_SUPPRESS_DEPRECATED_END
// !tensor means that the tensor is not initialized
EXPECT_EQ(!tensor, true);
}
TEST(tensor, wrap_tensor_with_unspecified_type_from_host_tensor) {
OPENVINO_SUPPRESS_DEPRECATED_START
auto host_tensor = std::make_shared<ngraph::runtime::HostTensor>(element::undefined, ov::PartialShape{});
auto tensor = ov::util::wrap_tensor(host_tensor);
OPENVINO_SUPPRESS_DEPRECATED_END
// !tensor means that the tensor is not initialized
EXPECT_EQ(!tensor, true);
}
TEST(tensor, create_tensor_with_zero_dims_check_stride) {
ov::Shape shape = {0, 0, 0, 0};
auto tensor = ov::Tensor(element::f32, shape);

View File

@ -72,7 +72,7 @@ void Reference::executeDynamicImpl(dnnl::stream strm) {
if (mem_desc->isDefined()) {
outputs.emplace_back(ovCoreNode->get_output_element_type(i), mem_desc->getShape().getStaticDims());
} else {
outputs.emplace_back(ovCoreNode->get_output_element_type(i), ov::util::make_dynamic_shape());
outputs.emplace_back(ovCoreNode->get_output_element_type(i), ov::Shape{0});
}
}
} else {

View File

@ -123,19 +123,13 @@ bool ov::runtime::interpreter::INTExecutable::call(std::vector<ov::Tensor>& outp
std::vector<ov::Tensor> op_outputs;
for (size_t i = 0; i < op->get_output_size(); ++i) {
auto tensor = op->output(i).get_tensor_ptr();
ov::Tensor host_tensor;
auto it = tensor_map.find(tensor);
auto output = op->output(i);
if (op::util::is_output(op) || it == tensor_map.end() || !it->second) {
OPENVINO_SUPPRESS_DEPRECATED_START
host_tensor = ov::Tensor(
output.get_element_type(),
output.get_partial_shape().is_dynamic() ? ov::util::make_dynamic_shape() : output.get_shape());
OPENVINO_SUPPRESS_DEPRECATED_END
op_outputs.emplace_back(output);
} else {
host_tensor = it->second;
op_outputs.push_back(it->second);
}
op_outputs.push_back(host_tensor);
}
{

View File

@ -103,14 +103,9 @@ void function(const std::shared_ptr<ov::Model>& function, const ov::TensorVector
" bytes");
}
const auto& results = function->get_results();
outputs.reserve(results.size());
for (size_t i = 0; i < results.size(); ++i) {
OPENVINO_SUPPRESS_DEPRECATED_START
ov::Shape res_shape = results[i]->get_output_partial_shape(0).is_static() ? results[i]->get_output_shape(0)
: ov::util::make_dynamic_shape();
OPENVINO_SUPPRESS_DEPRECATED_END
outputs.push_back(ov::Tensor(results[i]->get_element_type(), res_shape));
outputs.reserve(function->get_output_size());
for (const auto& result : function->get_results()) {
outputs.emplace_back(result->output(0));
}
call(outputs, inputs, function);
}

View File

@ -6,7 +6,6 @@
#include "backend.hpp"
#include "evaluate_node.hpp"
#include "tensor_conversion_util.hpp"
namespace ti_v0 {
ov::reference::custom_evaluate_function evaluate =