Use ov tensor in validation utils for bounds evaluation (#15252)

* Use ov::Tensor in are_equal

* Use ov::Tensor in bounds evaluation
- remove from node evaluates bounds for HostTensor
- update validation utils
- update operators

* Fix code style

* Fix windows compilation errors

* Fix windows compilation errors in scatter update

* Improve reshape output shape calculation
during shape infer

* Add common bound evaluator
- rename evaluate_bound_t -> evaluate_bound

* Fix code formatting

* Move bound evaluation util functions to dev API

* Fix compilation issues

* Remove from dev API not required bound eval
- extract bound eval functions to separate source

* Add missing doxygen comment

* Use ref evaluate in bounds calc for shape_of
because input data can have dynamic type and
create tensor is not possible and data type has
no meaning for shape calculation.
This commit is contained in:
Pawel Raasz 2023-01-31 19:18:28 +01:00 committed by GitHub
parent d57862edee
commit 0da339a7f2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
66 changed files with 938 additions and 800 deletions

View File

@ -7,10 +7,10 @@
#include <memory>
#include <ngraph/pattern/op/wrap_type.hpp>
#include <ngraph/rt_info.hpp>
#include <ngraph/validation_util.hpp>
#include <openvino/opsets/opset8.hpp>
#include <vector>
#include "bound_evaluation_util.hpp"
#include "itt.hpp"
#include "transformations/utils/utils.hpp"
@ -19,10 +19,11 @@ bool has_valid_pattern(const ov::Output<ov::Node>& node_out) {
const auto const_node = std::dynamic_pointer_cast<ov::opset8::Constant>(node_out.get_node_shared_ptr());
if (!const_node) {
// Lower bound of the value
auto lb = ngraph::evaluate_lower_bound(node_out);
auto lb = ov::evaluate_lower_bound(node_out);
if (!lb)
return false;
const auto lb_const_node = std::make_shared<ov::opset8::Constant>(lb);
const auto lb_const_node =
std::make_shared<ov::opset8::Constant>(lb.get_element_type(), lb.get_shape(), lb.data());
const auto& lb_values = lb_const_node->cast_vector<int64_t>();
// The pattern is valid if all lower bound values are higher than zero (not a special number)
@ -34,11 +35,12 @@ bool has_valid_pattern(const ov::Output<ov::Node>& node_out) {
return true;
// Upper bound of the value
auto ub = ngraph::evaluate_upper_bound(node_out);
auto ub = ov::evaluate_upper_bound(node_out);
if (!ub)
return false;
const auto ub_const_node = std::make_shared<ov::opset8::Constant>(ub);
const auto ub_const_node =
std::make_shared<ov::opset8::Constant>(ub.get_element_type(), ub.get_shape(), ub.data());
const auto& ub_values = ub_const_node->cast_vector<int64_t>();
if (lb_values.size() != ub_values.size())
return false;

View File

@ -4,13 +4,14 @@
#include "transformations/low_precision/mark_dequantization_subgraph.hpp"
#include <ngraph/validation_util.hpp>
#include <openvino/opsets/opset10.hpp>
#include <openvino/pass/pattern/op/or.hpp>
#include <openvino/pass/pattern/op/wrap_type.hpp>
#include <transformations/rt_info/dequantization_node.hpp>
#include <transformations/rt_info/disable_constant_folding.hpp>
#include "bound_evaluation_util.hpp"
using namespace ngraph;
ov::pass::MarkDequantizationSubgraph::MarkDequantizationSubgraph(const element::TypeVector& precisions) {
@ -57,7 +58,7 @@ ov::pass::MarkDequantizationSubgraph::MarkDequantizationSubgraph(const element::
}
std::vector<Node*> tmp;
if (ngraph::could_propagate(input, tmp)) {
if (ov::could_propagate(input, tmp)) {
// disable ConstantFolding if dequantization subgraph is on constant data
ov::disable_constant_folding(convert);
}

View File

@ -0,0 +1,34 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "openvino/core/node.hpp"
namespace ov {
/// \brief Checks if bounds can be propagated on Node output.
/// \param output Node output to test for bound propagation.
/// \param order return vector of nodes for output which can be processed for bound evaluation.
/// \return True if bounds can be propagated for output and order vector has valid data, otherwise false.
OPENVINO_API bool could_propagate(const Output<Node>& output, std::vector<Node*>& order);
/// \brief Evaluates lower value estimation of the output tensor. Traverses graph up to deduce
/// estimation through it.
/// \param Node output pointing to the tensor for estimation.
/// \return Tensor to estimated value.
OPENVINO_API Tensor evaluate_lower_bound(const Output<Node>& output);
/// \brief Evaluates lower value estimation of the output tensor. Traverses graph up to deduce
/// estimation through it.
/// \param output Tensor to be estimated.
/// \return Tensor to estimated value.
OPENVINO_API Tensor evaluate_upper_bound(const Output<Node>& output);
/// \brief Evaluates lower and upper value estimations of the output tensor. Traverses graph up
/// to deduce estimation through it.
/// \param output Node output pointing to the tensor for estimation.
/// \return pair with Tensors for lower and upper value estimation.
OPENVINO_API std::pair<Tensor, Tensor> evaluate_both_bounds(const Output<Node>& output);
} // namespace ov

View File

@ -16,6 +16,7 @@
namespace ngraph {
using ov::evaluate_as_partial_shape;
using ov::get_constant_from_source;
using ov::has_no_labels;
using ov::infer_auto_padding;
using ov::infer_convolution_forward;
using ov::normalize_axes;
@ -159,57 +160,6 @@ NGRAPH_API void evaluate_nodes(std::map<RawNodeOutput, HostTensorPtr>& value_map
const OutputVector& outputs,
const EvaluationContext& evaluation_context = EvaluationContext());
/// \brief Evaluates lower value estimation of the output tensor. Traverses graph up to deduce
/// estimation through it.
/// \param Node output pointing to the tensor for estimation.
/// \return HostTensorPtr to estimated value if can be determined, or nullptr.
NGRAPH_API HostTensorPtr evaluate_lower_bound(const Output<Node>& output);
/// \brief Evaluates lower value estimation of the output tensor. Traverses graph up to deduce
/// estimation through it.
/// \param output Tensor to be estimated.
/// \return HostTensorPtr to estimated value if can be determined, or nullptr.
NGRAPH_API HostTensorPtr evaluate_upper_bound(const Output<Node>& output);
/// \brief Evaluates lower and upper value estimations of the output tensor. Traverses graph up
/// to deduce estimation through it.
/// \param output Node output pointing to the tensor for estimation.
/// \return pair with HostTensorPtrs for lower and upper value estimation. Each object in pair
/// could be HostTensorPtr to estimated value if particular bound can be determined, or nullptr.
NGRAPH_API std::pair<HostTensorPtr, HostTensorPtr> evaluate_both_bounds(const Output<Node>& output);
/// \brief Estimates upper bound for node output tensors using only upper bounds of the nodes
/// inputs.
/// \param node Operation to be performed
/// \param output_values Vector of HostTensorPtrs representing resulting upper value estimations
/// \return boolean status if value evaluation was successful.
NGRAPH_API bool default_upper_bound_evaluator(const Node* node, const HostTensorVector& output_values);
/// \brief Estimates lower bound for node output tensors using only lower bounds of the nodes
/// inputs.
/// \param node Operation to be performed
/// \param output_values Vector of HostTensorPtrs representing resulting lower value estimations
/// \return boolean status if value evaluation was successful.
NGRAPH_API bool default_lower_bound_evaluator(const Node* node, const HostTensorVector& output_values);
/// \brief Estimates both bounds for node output tensors using both bounds of inputs. Works for
/// operations with two inputs (in_1 and in_2). Brute forces all the pairs of bounds for inputs
/// and evaluates all of them: {in_1_lower, in_2 lower}, {in_1_lower, in_2 upper}, {in_1_upper,
/// in_2_lower}, {in_1_upper, in_2_upper}. Lower and upper values are selected from all the
/// outputs calculated using input pairs.
/// \param node Operation to be performed
/// \param output_values Vector of HostTensorPtrs representing resulting lower value estimations
/// \return boolean status if value evaluation was successful.
NGRAPH_API bool interval_bound_evaluator(const Node* node,
const HostTensorVector& lower_output_values,
const HostTensorVector& upper_output_values);
/// \brief Checks if all the elements of the bound HostTensor are positive
NGRAPH_API bool host_tensor_is_positive(const HostTensorPtr& bound);
/// \brief Checks if lower and upper bounds of the corresponding tensor are set (not nullptr)
/// and pointers are the same. It doesn't check if lower and upper values are the same relying
/// only on pointers comparison.
NGRAPH_API bool has_and_set_equal_bounds(const Output<Node>& source);
/// \brief Returns a Constant storing scalar value equal to std::numeric_limits<t>::max()
NGRAPH_API std::shared_ptr<op::Constant> get_constant_max_of_type(element::Type_t t);
@ -223,8 +173,6 @@ NGRAPH_API std::shared_ptr<op::Constant> get_constant_lowest_of_type(element::Ty
/// that all the HostTensorPtrs are not equal to nullptr
NGRAPH_API bool validate_host_tensor_vector(const HostTensorVector& v, const size_t& size);
NGRAPH_API bool could_propagate(const Output<Node>& output, std::vector<Node*>& order);
namespace opset1 {
///
/// \brief Calculates padding values for ConvolutionBackpropData operator.

View File

@ -16,6 +16,7 @@
#include "openvino/core/partial_shape.hpp"
#include "openvino/core/shape.hpp"
#include "openvino/core/type/element_type.hpp"
#include "openvino/runtime/tensor.hpp"
namespace ngraph {
namespace runtime {
@ -74,9 +75,9 @@ public:
void set_partial_shape(const PartialShape& partial_shape);
/// \brief sets lower bound value description
void set_lower_value(const ngraph::HostTensorPtr& value);
void set_lower_value(const ov::Tensor& value);
/// \brief sets upper bound value description
void set_upper_value(const ngraph::HostTensorPtr& value);
void set_upper_value(const ov::Tensor& value);
/// \brief sets value label description
void set_value_label(const TensorLabel& value_label);
/// \brief unsets bound value descriptions
@ -90,11 +91,11 @@ public:
return m_partial_shape;
}
/// \brief gets lower bound value description
ngraph::HostTensorPtr get_lower_value() const {
const ov::Tensor& get_lower_value() const {
return m_lower_value;
}
/// \brief gets upper bound value description
ngraph::HostTensorPtr get_upper_value() const {
const ov::Tensor& get_upper_value() const {
return m_upper_value;
}
/// \brief gets upper bound value description
@ -103,7 +104,7 @@ public:
}
/// \brief checks if lower and upper bound are set and point to the same HostTensor
bool has_and_set_bound() const {
return m_upper_value != nullptr && m_upper_value == m_lower_value;
return m_upper_value && m_lower_value && m_upper_value.data() == m_lower_value.data();
}
size_t size() const;
@ -133,7 +134,7 @@ protected:
// TODO: end
PartialShape m_partial_shape;
ngraph::HostTensorPtr m_lower_value, m_upper_value;
ov::Tensor m_lower_value, m_upper_value;
TensorLabel m_value_label;
std::string m_legacy_name;

View File

@ -221,12 +221,6 @@ public:
virtual bool evaluate(const ov::HostTensorVector& output_values,
const ov::HostTensorVector& input_values,
const EvaluationContext& evaluationContext) const;
OPENVINO_DEPRECATED("This method is deprecated and will be removed soon. Please use evaluate_lower with "
"ov::Tensor instead.")
virtual bool evaluate_lower(const ov::HostTensorVector& output_values) const;
OPENVINO_DEPRECATED("This method is deprecated and will be removed soon. Please use evaluate_upper with "
"ov::Tensor instead.")
virtual bool evaluate_upper(const ov::HostTensorVector& output_values) const;
/// \brief Evaluates the op on input_values putting results in output_values
/// \param output_values Tensors for the outputs to compute. One for each result

View File

@ -196,8 +196,7 @@ OPENVINO_API bool are_unique(const std::vector<int64_t>& data);
/// \param max Maximum value boiund
///
/// \return Value if between min, max otherwise min or max.
OPENVINO_API
int64_t clip(const int64_t& value, const int64_t& min, const int64_t& max);
OPENVINO_API int64_t clip(const int64_t& value, const int64_t& min, const int64_t& max);
/// \brief Constant folds a subgraph to a constant node
///

View File

@ -43,6 +43,7 @@ public:
OPENVINO_SUPPRESS_DEPRECATED_START
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
OPENVINO_SUPPRESS_DEPRECATED_END
bool evaluate(TensorVector& outputs, const TensorVector& inputs) const override;
bool has_evaluate() const override;
};
} // namespace v1

View File

@ -47,9 +47,9 @@ public:
}
OPENVINO_SUPPRESS_DEPRECATED_START
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
bool evaluate_lower(const HostTensorVector& outputs) const override;
bool evaluate_upper(const HostTensorVector& outputs) const override;
OPENVINO_SUPPRESS_DEPRECATED_END
bool evaluate_lower(TensorVector& outputs) const override;
bool evaluate_upper(TensorVector& outputs) const override;
bool has_evaluate() const override;
private:

View File

@ -54,10 +54,9 @@ public:
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
OPENVINO_SUPPRESS_DEPRECATED_END
bool has_evaluate() const override;
OPENVINO_SUPPRESS_DEPRECATED_START
bool evaluate_lower(const HostTensorVector& output_values) const override;
bool evaluate_upper(const HostTensorVector& output_values) const override;
OPENVINO_SUPPRESS_DEPRECATED_END
bool evaluate(TensorVector& outputs, const TensorVector& inputs) const override;
bool evaluate_lower(TensorVector& output_values) const override;
bool evaluate_upper(TensorVector& output_values) const override;
bool evaluate_label(TensorLabelVector& output_labels) const override;
protected:

View File

@ -181,10 +181,8 @@ public:
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
OPENVINO_SUPPRESS_DEPRECATED_END
bool has_evaluate() const override;
OPENVINO_SUPPRESS_DEPRECATED_START
bool evaluate_lower(const HostTensorVector& outputs) const override;
bool evaluate_upper(const HostTensorVector& outputs) const override;
OPENVINO_SUPPRESS_DEPRECATED_END
bool evaluate_lower(TensorVector& outputs) const override;
bool evaluate_upper(TensorVector& outputs) const override;
// Don't constant fold a constant; it would make a copy
bool constant_fold(OutputVector& outputs, const OutputVector& inputs) override {

View File

@ -44,10 +44,8 @@ public:
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
OPENVINO_SUPPRESS_DEPRECATED_END
bool has_evaluate() const override;
OPENVINO_SUPPRESS_DEPRECATED_START
bool evaluate_lower(const HostTensorVector& outputs) const override;
bool evaluate_upper(const HostTensorVector& outputs) const override;
OPENVINO_SUPPRESS_DEPRECATED_END
bool evaluate_lower(TensorVector& outputs) const override;
bool evaluate_upper(TensorVector& outputs) const override;
bool evaluate_label(TensorLabelVector& output_labels) const override;
protected:

View File

@ -49,10 +49,8 @@ public:
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
OPENVINO_SUPPRESS_DEPRECATED_END
bool has_evaluate() const override;
OPENVINO_SUPPRESS_DEPRECATED_START
bool evaluate_lower(const HostTensorVector& outputs) const override;
bool evaluate_upper(const HostTensorVector& outputs) const override;
OPENVINO_SUPPRESS_DEPRECATED_END
bool evaluate_lower(TensorVector& outputs) const override;
bool evaluate_upper(TensorVector& outputs) const override;
protected:
bool m_pythondiv{true};

View File

@ -30,10 +30,8 @@ public:
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
OPENVINO_SUPPRESS_DEPRECATED_END
bool has_evaluate() const override;
OPENVINO_SUPPRESS_DEPRECATED_START
bool evaluate_lower(const HostTensorVector& outputs) const override;
bool evaluate_upper(const HostTensorVector& outputs) const override;
OPENVINO_SUPPRESS_DEPRECATED_END
bool evaluate_lower(TensorVector& outputs) const override;
bool evaluate_upper(TensorVector& outputs) const override;
};
} // namespace v1
} // namespace op

View File

@ -36,10 +36,8 @@ public:
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
OPENVINO_SUPPRESS_DEPRECATED_END
bool has_evaluate() const override;
OPENVINO_SUPPRESS_DEPRECATED_START
bool evaluate_lower(const HostTensorVector& outputs) const override;
bool evaluate_upper(const HostTensorVector& outputs) const override;
OPENVINO_SUPPRESS_DEPRECATED_END
bool evaluate_lower(TensorVector& outputs) const override;
bool evaluate_upper(TensorVector& outputs) const override;
};
} // namespace v1
} // namespace op

View File

@ -50,10 +50,8 @@ public:
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
OPENVINO_SUPPRESS_DEPRECATED_END
bool has_evaluate() const override;
OPENVINO_SUPPRESS_DEPRECATED_START
bool evaluate_lower(const HostTensorVector& outputs) const override;
bool evaluate_upper(const HostTensorVector& outputs) const override;
OPENVINO_SUPPRESS_DEPRECATED_END
bool evaluate_upper(TensorVector& outputs) const override;
bool evaluate_lower(TensorVector& outputs) const override;
bool evaluate_label(TensorLabelVector& output_labels) const override;
bool constant_fold(OutputVector& output_values, const OutputVector& inputs_values) override;

View File

@ -34,9 +34,9 @@ public:
OPENVINO_SUPPRESS_DEPRECATED_START
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
bool evaluate_lower(const HostTensorVector& outputs) const override;
bool evaluate_upper(const HostTensorVector& outputs) const override;
OPENVINO_SUPPRESS_DEPRECATED_END
bool evaluate_lower(TensorVector& outputs) const override;
bool evaluate_upper(TensorVector& outputs) const override;
bool evaluate_label(TensorLabelVector& output_labels) const override;
bool has_evaluate() const override;

View File

@ -36,10 +36,9 @@ public:
bool evaluate(const HostTensorVector& output_values, const HostTensorVector& input_values) const override;
OPENVINO_SUPPRESS_DEPRECATED_END
bool has_evaluate() const override;
OPENVINO_SUPPRESS_DEPRECATED_START
bool evaluate_lower(const HostTensorVector& output_values) const override;
bool evaluate_upper(const HostTensorVector& output_values) const override;
OPENVINO_SUPPRESS_DEPRECATED_END
bool evaluate(TensorVector& output_values, const TensorVector& input_values) const override;
bool evaluate_lower(TensorVector& output_values) const override;
bool evaluate_upper(TensorVector& output_values) const override;
bool evaluate_label(TensorLabelVector& output_labels) const override;
bool constant_fold(OutputVector& output_values, const OutputVector& input_values) override;
@ -67,10 +66,9 @@ public:
bool evaluate(const HostTensorVector& output_values, const HostTensorVector& input_values) const override;
OPENVINO_SUPPRESS_DEPRECATED_END
bool has_evaluate() const override;
OPENVINO_SUPPRESS_DEPRECATED_START
bool evaluate_lower(const HostTensorVector& output_values) const override;
bool evaluate_upper(const HostTensorVector& output_values) const override;
OPENVINO_SUPPRESS_DEPRECATED_END
bool evaluate(TensorVector& output_values, const TensorVector& input_values) const override;
bool evaluate_lower(TensorVector& output_values) const override;
bool evaluate_upper(TensorVector& output_values) const override;
bool evaluate_label(TensorLabelVector& output_labels) const override;
bool constant_fold(OutputVector& output_values, const OutputVector& input_values) override;
};

View File

@ -48,10 +48,8 @@ public:
// TODO: Update to use new evaluate with TensorVector
bool evaluate(const HostTensorVector&, const HostTensorVector&) const override;
OPENVINO_SUPPRESS_DEPRECATED_END
OPENVINO_SUPPRESS_DEPRECATED_START
bool evaluate_lower(const HostTensorVector& outputs) const override;
bool evaluate_upper(const HostTensorVector& outputs) const override;
OPENVINO_SUPPRESS_DEPRECATED_END
bool evaluate_lower(TensorVector& outputs) const override;
bool evaluate_upper(TensorVector& outputs) const override;
bool evaluate_label(TensorLabelVector& output_labels) const override;
std::shared_ptr<v0::Constant> get_default_const_axes(const Output<Node>& start) const;

View File

@ -41,9 +41,9 @@ public:
}
OPENVINO_SUPPRESS_DEPRECATED_START
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
bool evaluate_lower(const HostTensorVector& outputs) const override;
bool evaluate_upper(const HostTensorVector& outputs) const override;
OPENVINO_SUPPRESS_DEPRECATED_END
bool evaluate_lower(TensorVector& outputs) const override;
bool evaluate_upper(TensorVector& outputs) const override;
bool has_evaluate() const override;
bool evaluate_label(TensorLabelVector& output_labels) const override;

View File

@ -26,10 +26,8 @@ public:
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
OPENVINO_SUPPRESS_DEPRECATED_END
bool has_evaluate() const override;
OPENVINO_SUPPRESS_DEPRECATED_START
bool evaluate_lower(const HostTensorVector& outputs) const override;
bool evaluate_upper(const HostTensorVector& outputs) const override;
OPENVINO_SUPPRESS_DEPRECATED_END
bool evaluate_lower(TensorVector& outputs) const override;
bool evaluate_upper(TensorVector& outputs) const override;
bool evaluate_label(TensorLabelVector& output_labels) const override;
bool constant_fold(OutputVector& output_values, const OutputVector& inputs_values) override;

View File

@ -113,9 +113,9 @@ public:
OPENVINO_SUPPRESS_DEPRECATED_END
bool has_evaluate() const override;
OPENVINO_SUPPRESS_DEPRECATED_START
bool evaluate_lower(const HostTensorVector& outputs) const override;
bool evaluate_upper(const HostTensorVector& outputs) const override;
OPENVINO_SUPPRESS_DEPRECATED_END
bool evaluate_lower(TensorVector& outputs) const override;
bool evaluate_upper(TensorVector& outputs) const override;
bool evaluate_label(TensorLabelVector& output_labels) const override;
private:

View File

@ -30,9 +30,9 @@ public:
OPENVINO_SUPPRESS_DEPRECATED_START
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
bool evaluate_lower(const HostTensorVector& outputs) const override;
bool evaluate_upper(const HostTensorVector& outputs) const override;
OPENVINO_SUPPRESS_DEPRECATED_END
bool evaluate_lower(TensorVector& outputs) const override;
bool evaluate_upper(TensorVector& outputs) const override;
bool has_evaluate() const override;
bool evaluate(ov::TensorVector& output_values, const ov::TensorVector& input_values) const override;
bool evaluate_label(TensorLabelVector& output_labels) const override;

View File

@ -34,10 +34,10 @@ public:
OPENVINO_SUPPRESS_DEPRECATED_START
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
bool evaluate_upper(const HostTensorVector& output_values) const override;
bool evaluate_lower(const HostTensorVector& output_values) const override;
OPENVINO_SUPPRESS_DEPRECATED_END
bool evaluate_upper(TensorVector& output_values) const override;
bool evaluate_lower(TensorVector& output_values) const override;
bool has_evaluate() const override;
bool evaluate_label(TensorLabelVector& output_labels) const override;

View File

@ -27,10 +27,8 @@ public:
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
OPENVINO_SUPPRESS_DEPRECATED_END
bool has_evaluate() const override;
OPENVINO_SUPPRESS_DEPRECATED_START
bool evaluate_lower(const HostTensorVector& output_values) const override;
bool evaluate_upper(const HostTensorVector& output_values) const override;
OPENVINO_SUPPRESS_DEPRECATED_END
bool evaluate_lower(TensorVector& output_values) const override;
bool evaluate_upper(TensorVector& output_values) const override;
bool evaluate_label(TensorLabelVector& output_labels) const override;
bool constant_fold(OutputVector& output_values, const OutputVector& inputs_values) override;

View File

@ -58,10 +58,8 @@ public:
m_autob = autob;
}
bool visit_attributes(AttributeVisitor& visitor) override;
OPENVINO_SUPPRESS_DEPRECATED_START
bool evaluate_lower(const HostTensorVector& outputs) const override;
bool evaluate_upper(const HostTensorVector& outputs) const override;
OPENVINO_SUPPRESS_DEPRECATED_END
bool evaluate_lower(TensorVector& outputs) const override;
bool evaluate_upper(TensorVector& outputs) const override;
private:
AutoBroadcastSpec m_autob;

View File

@ -63,10 +63,8 @@ protected:
bool evaluate_broadcast(const HostTensorPtr& arg0, const HostTensorPtr& out, const AxisSet& broadcast_axes) const;
OPENVINO_SUPPRESS_DEPRECATED_START
bool evaluate_lower(const HostTensorVector& outputs) const override;
bool evaluate_upper(const HostTensorVector& outputs) const override;
OPENVINO_SUPPRESS_DEPRECATED_END
bool evaluate_lower(TensorVector& outputs) const override;
bool evaluate_upper(TensorVector& outputs) const override;
PartialShape get_result_shape_pdpd(const PartialShape& arg0_shape,
const PartialShape& target_shape,

View File

@ -30,9 +30,9 @@ public:
OPENVINO_SUPPRESS_DEPRECATED_START
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
bool evaluate_lower(const HostTensorVector& outputs) const override;
bool evaluate_upper(const HostTensorVector& outputs) const override;
OPENVINO_SUPPRESS_DEPRECATED_END
bool evaluate_lower(TensorVector& outputs) const override;
bool evaluate_upper(TensorVector& outputs) const override;
bool evaluate_label(TensorLabelVector& output_labels) const override;
bool constant_fold(OutputVector& output_values, const OutputVector& inputs_values) override;

View File

@ -38,10 +38,10 @@ public:
}
OPENVINO_SUPPRESS_DEPRECATED_START
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
bool evaluate_lower(const HostTensorVector& outputs) const override;
bool evaluate_upper(const HostTensorVector& outputs) const override;
OPENVINO_SUPPRESS_DEPRECATED_END
bool evaluate_lower(TensorVector& outputs) const override;
bool evaluate_upper(TensorVector& outputs) const override;
bool has_evaluate() const override;
bool evaluate_label(TensorLabelVector& output_labels) const override;

View File

@ -237,11 +237,15 @@ std::unique_ptr<TResult> get_input_bounds(const ov::Node* op,
const auto& et = get_input_const_element_type(op, idx, constant_data);
out.reset(new TResult(make_bounds_vec(et, *lowers, *lowers)));
} else {
auto bounds = ngraph::evaluate_both_bounds(op->get_input_source_output(idx));
if (bounds.first && bounds.second) {
ov::Tensor lb, ub;
std::tie(lb, ub) = ov::evaluate_both_bounds(op->get_input_source_output(idx));
if (lb && ub) {
const auto& et = op->get_input_element_type(idx);
auto lowers = std::make_shared<op::v0::Constant>(bounds.first)->cast_vector<int64_t>();
auto uppers = std::make_shared<op::v0::Constant>(bounds.second)->cast_vector<int64_t>();
auto lowers = std::make_shared<op::v0::Constant>(lb.get_element_type(), lb.get_shape(), lb.data())
->cast_vector<int64_t>();
auto uppers = std::make_shared<op::v0::Constant>(ub.get_element_type(), ub.get_shape(), ub.data())
->cast_vector<int64_t>();
out.reset(new TResult(make_bounds_vec(et, lowers, uppers)));
}
}

View File

@ -8,6 +8,7 @@
#include <openvino/opsets/opset1.hpp>
#include <type_traits>
#include "bound_evaluation_util.hpp"
#include "shape_infer_type_utils.hpp"
template <class OpType, class T>

View File

@ -0,0 +1,404 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "bound_evaluate.hpp"
#include "ngraph/validation_util.hpp"
#include "openvino/core/rt_info.hpp"
#include "openvino/opsets/opset10.hpp"
namespace {
using namespace ov;
void propagate_rt_info(Node* node, const Output<Node>& final_port) {
auto node_outputs = node->outputs();
bool same_outputs = std::all_of(node_outputs.begin(), node_outputs.end(), [](const Output<Node>& output) {
return output.get_tensor().has_and_set_bound();
});
if (same_outputs && op::util::is_constant(node)) // constant should not propagate it's rt_info
{
std::unordered_set<Node*> stop_nodes;
for (const auto& in : final_port.get_target_inputs())
stop_nodes.insert(in.get_node());
auto curr_node = node->shared_from_this();
for (const auto& output : node_outputs) {
if (output == final_port)
continue;
for (auto& in : output.get_target_inputs()) {
if (stop_nodes.count(in.get_node()))
continue;
try {
auto consumer = in.get_node()->shared_from_this();
copy_runtime_info({curr_node, consumer}, consumer);
} catch (const std::bad_weak_ptr&) {
// Exception can be thrown, if `shared_from_this()` was called during node creation.
// Continue propagation for other nodes.
}
}
}
}
}
bool are_same_tensor(const ov::Tensor& lhs, const ov::Tensor& rhs) {
return (lhs && rhs) && (lhs.get_element_type() == rhs.get_element_type()) && (lhs.get_shape() == rhs.get_shape()) &&
(lhs.data() == rhs.data());
}
bool are_equal(const ov::Tensor& lhs, const ov::Tensor& rhs, size_t element_limit = 10) {
if (!lhs || !rhs) {
return false;
}
const auto& lhs_shape = lhs.get_shape();
const auto& rhs_shape = rhs.get_shape();
const auto& lhs_et = lhs.get_element_type();
const auto& rhs_et = rhs.get_element_type();
auto are_eq = (lhs_et == rhs_et) && (lhs_shape == rhs_shape) && shape_size(lhs_shape) <= element_limit;
if (are_eq) {
are_eq = memcmp(lhs.data(), rhs.data(), lhs.get_byte_size()) == 0;
}
return are_eq;
}
ov::Tensor evaluate_bound(const Output<Node>& output, bool is_upper, bool invalidate_all_unused_values = true) {
if (is_upper && output.get_tensor().get_upper_value()) {
return output.get_tensor().get_upper_value();
}
if (!is_upper && output.get_tensor().get_lower_value()) {
return output.get_tensor().get_lower_value();
}
std::vector<Node*> order;
if (could_propagate(output, order)) {
reverse(order.begin(), order.end());
for (const auto& node : order) {
ov::TensorVector outputs;
for (const auto& out : node->outputs()) {
const auto& out_shape = out.get_partial_shape();
auto shape = out_shape.is_static() ? out_shape.to_shape() : Shape(out_shape.rank().get_length());
outputs.emplace_back(out.get_element_type(), shape);
}
if (is_upper ? node->evaluate_upper(outputs) : node->evaluate_lower(outputs)) {
const auto& input_values = node->input_values();
TensorLabelVector output_labels(outputs.size());
bool same_inputs = std::all_of(input_values.begin(), input_values.end(), [](const Output<Node>& input) {
auto& t = input.get_tensor();
return t.has_and_set_bound() || are_equal(t.get_lower_value(), t.get_upper_value());
});
for (size_t i = 0; i < outputs.size(); ++i) {
if ((same_inputs || is_upper) && !node->get_output_tensor(i).get_upper_value() && outputs[i]) {
node->get_output_tensor(i).set_upper_value(outputs[i]);
}
if ((same_inputs || !is_upper) && !node->get_output_tensor(i).get_lower_value() && outputs[i]) {
node->get_output_tensor(i).set_lower_value(outputs[i]);
}
if (are_equal(node->get_output_tensor(i).get_lower_value(),
node->get_output_tensor(i).get_upper_value())) {
node->get_output_tensor(i).set_lower_value(node->get_output_tensor(i).get_upper_value());
}
}
if (node->evaluate_label(output_labels))
for (size_t i = 0; i < outputs.size(); ++i)
node->get_output_tensor(i).set_value_label(output_labels[i]);
for (const auto& input : input_values) {
auto& tensor = input.get_tensor();
bool should_invalidate = invalidate_all_unused_values;
if (tensor.get_lower_value() && shape_size(tensor.get_lower_value().get_shape()) > 10)
should_invalidate |= true;
if (tensor.get_upper_value() && shape_size(tensor.get_upper_value().get_shape()) > 10)
should_invalidate |= true;
if (should_invalidate && input.get_target_inputs().size() == 1)
tensor.invalidate_values();
}
propagate_rt_info(node, output);
} else {
break;
}
}
}
if (is_upper)
return output.get_tensor().get_upper_value();
else
return output.get_tensor().get_lower_value();
}
bool default_bound_evaluator(const ov::Node* node,
const ov::Tensor& (ov::descriptor::Tensor::*get_bound)() const,
ov::TensorVector& output_values) {
const auto size = node->get_input_size();
ov::TensorVector inputs;
inputs.reserve(size);
for (size_t i = 0; i < size; ++i) {
if (auto bound = (node->get_input_tensor(i).*get_bound)()) {
inputs.push_back(bound);
} else {
return false;
}
}
return node->evaluate(output_values, inputs);
}
ov::Tensor equality_mask(const ov::Tensor& tensor, const std::shared_ptr<op::v0::Constant>& constant) {
auto mask_out = ov::TensorVector{{element::boolean, tensor.get_shape()}};
auto c_tensor = ov::Tensor(constant->get_element_type(), constant->get_shape());
memcpy(c_tensor.data(), constant->get_data_ptr(), c_tensor.get_byte_size());
const auto& param = std::make_shared<op::v0::Parameter>(tensor.get_element_type(), tensor.get_shape());
op::v1::Equal(param, constant).evaluate(mask_out, ov::TensorVector{tensor, c_tensor});
return mask_out.front();
}
ov::Tensor or_tensor(const ov::Tensor& lhs, const ov::Tensor& rhs) {
auto outs = ov::TensorVector{{lhs.get_element_type(), Shape{0}}};
op::v1::LogicalOr(std::make_shared<op::v0::Parameter>(lhs.get_element_type(), lhs.get_shape()),
std::make_shared<op::v0::Parameter>(rhs.get_element_type(), rhs.get_shape()),
ngraph::op::AutoBroadcastType::NUMPY)
.evaluate(outs, ov::TensorVector{lhs, rhs});
return outs.front();
}
struct TensorVectorCmp {
bool operator()(const ov::TensorVector& lhs, const ov::TensorVector& rhs) const {
return !std::equal(lhs.begin(), lhs.end(), rhs.begin(), &are_same_tensor);
}
};
ov::Tensor make_tensor_max_of_type(ov::element::Type_t t) {
#define OV_TYPE_TO_MAX_CONST(ET, TENSOR) \
case ET: \
*(TENSOR.data<fundamental_type_for<ET>>()) = std::numeric_limits<fundamental_type_for<ET>>::max(); \
break
auto tensor = ov::Tensor(t, Shape{});
switch (t) {
OV_TYPE_TO_MAX_CONST(element::boolean, tensor);
OV_TYPE_TO_MAX_CONST(element::bf16, tensor);
OV_TYPE_TO_MAX_CONST(element::f16, tensor);
OV_TYPE_TO_MAX_CONST(element::f32, tensor);
OV_TYPE_TO_MAX_CONST(element::f64, tensor);
OV_TYPE_TO_MAX_CONST(element::i8, tensor);
OV_TYPE_TO_MAX_CONST(element::i16, tensor);
OV_TYPE_TO_MAX_CONST(element::i32, tensor);
OV_TYPE_TO_MAX_CONST(element::i64, tensor);
OV_TYPE_TO_MAX_CONST(element::u1, tensor);
OV_TYPE_TO_MAX_CONST(element::u8, tensor);
OV_TYPE_TO_MAX_CONST(element::u16, tensor);
OV_TYPE_TO_MAX_CONST(element::u32, tensor);
OV_TYPE_TO_MAX_CONST(element::u64, tensor);
default:
break;
}
#undef OV_TYPE_TO_MAX_CONST
return tensor;
}
} // namespace
bool ov::could_propagate(const Output<Node>& output, std::vector<Node*>& order) {
auto status = true;
std::deque<Node*> nodes_to_calculate = {output.get_node()};
order.push_back(output.get_node());
while (status && !nodes_to_calculate.empty()) {
auto current_node = nodes_to_calculate.front();
nodes_to_calculate.pop_front();
if (current_node->inputs().empty() && !is_type<op::v0::Constant>(current_node)) {
status = false;
} else if (!is_type<op::v0::ShapeOf>(current_node) && !is_type<op::v3::ShapeOf>(current_node)) {
// not a leaf, not a shape_of -- continue to search
for (const auto& input_value : current_node->input_values()) {
const auto& input_node = input_value.get_node();
order.push_back(input_node);
nodes_to_calculate.push_front(input_node);
}
}
}
return status;
}
ov::Tensor ov::evaluate_lower_bound(const Output<Node>& output) {
return evaluate_bound(output, false);
}
ov::Tensor ov::evaluate_upper_bound(const Output<Node>& output) {
return evaluate_bound(output, true);
}
std::pair<ov::Tensor, ov::Tensor> ov::evaluate_both_bounds(const Output<Node>& output) {
evaluate_bound(output, false, false);
evaluate_bound(output, true);
return {output.get_tensor_ptr()->get_lower_value(), output.get_tensor_ptr()->get_upper_value()};
}
bool ov::default_lower_bound_evaluator(const Node* node, TensorVector& output_values) {
return default_bound_evaluator(node, &descriptor::Tensor::get_lower_value, output_values);
}
bool ov::default_upper_bound_evaluator(const Node* node, TensorVector& output_values) {
return default_bound_evaluator(node, &descriptor::Tensor::get_upper_value, output_values);
}
bool ov::interval_bound_evaluator(const Node* node,
TensorVector& lower_output_values,
TensorVector& upper_output_values) {
// TODO: relax for n inputs ?
OPENVINO_ASSERT(lower_output_values.size() == upper_output_values.size());
OPENVINO_ASSERT(node->get_input_size() == 2);
const auto num_of_outputs = node->get_output_size();
auto low_0 = ov::evaluate_lower_bound(node->get_input_source_output(0));
auto low_1 = ov::evaluate_lower_bound(node->get_input_source_output(1));
auto up_0 = ov::evaluate_upper_bound(node->get_input_source_output(0));
auto up_1 = ov::evaluate_upper_bound(node->get_input_source_output(1));
std::set<TensorVector, TensorVectorCmp> input_variants = {{low_0, low_1},
{low_0, up_1},
{up_0, low_1},
{up_0, up_1}};
for (const auto& variant_of_input_vector : input_variants)
for (const auto& input_tensor : variant_of_input_vector)
if (!input_tensor)
return false;
if (input_variants.size() == 1)
return node->evaluate(upper_output_values, *input_variants.begin()) &&
node->evaluate(lower_output_values, *input_variants.begin());
auto zero = op::v0::Constant::create(element::i64, {1}, {0});
const auto zero_t = ov::Tensor(element::i64, Shape{1});
*zero_t.data<int64_t>() = 0;
std::vector<TensorVector> unsqueezed_output_variants;
for (auto& input_variant : input_variants) {
TensorVector vector_of_output_variants;
for (const auto& output : lower_output_values) {
vector_of_output_variants.emplace_back(output.get_element_type(), output.get_shape());
}
node->evaluate(vector_of_output_variants, input_variant);
TensorVector vector_of_unsqueezed_output_variants;
for (const auto& output : vector_of_output_variants) {
if (!output) {
return false;
}
auto unsqueezed_shape = output.get_shape();
unsqueezed_shape.insert(unsqueezed_shape.begin(), 1);
auto unsqueezed_outputs = TensorVector{{output.get_element_type(), unsqueezed_shape}};
auto& unsqueezed = unsqueezed_outputs.front();
op::v0::Unsqueeze().evaluate(unsqueezed_outputs, TensorVector{output, zero_t});
vector_of_unsqueezed_output_variants.push_back(unsqueezed);
}
unsqueezed_output_variants.push_back(vector_of_unsqueezed_output_variants);
}
auto input_0_maximum_value = ngraph::get_constant_max_of_type(low_0.get_element_type());
auto input_1_maximum_value = ngraph::get_constant_max_of_type(low_1.get_element_type());
if (input_0_maximum_value == nullptr || input_1_maximum_value == nullptr)
return false;
auto input_0_low_dyn_mask = equality_mask(low_0, input_0_maximum_value);
auto input_0_up_dyn_mask = equality_mask(up_0, input_0_maximum_value);
auto input_1_low_dyn_mask = equality_mask(low_1, input_1_maximum_value);
auto input_1_up_dyn_mask = equality_mask(up_1, input_1_maximum_value);
auto final_input_dyn_mask = or_tensor(or_tensor(input_0_low_dyn_mask, input_0_up_dyn_mask),
or_tensor(input_1_low_dyn_mask, input_1_up_dyn_mask));
bool fully_defined = true;
for (size_t i = 0; i < num_of_outputs; ++i) {
TensorVector all_variants_for_ith_output;
for (const auto& unsqueezed_output_variant : unsqueezed_output_variants)
all_variants_for_ith_output.push_back(unsqueezed_output_variant[i]);
auto concated_shape = all_variants_for_ith_output[0].get_shape();
concated_shape[0] = all_variants_for_ith_output.size();
auto concat = Tensor(all_variants_for_ith_output[0].get_element_type(), concated_shape);
auto concat_out = TensorVector{concat};
auto c = op::v0::Concat();
c.set_axis(0);
c.evaluate(concat_out, all_variants_for_ith_output);
auto fake_param =
std::make_shared<op::v0::Parameter>(all_variants_for_ith_output[0].get_element_type(), concated_shape);
auto reduce_min_op = op::v1::ReduceMin(fake_param, zero, false);
auto lower_out = ov::TensorVector{lower_output_values[i]};
reduce_min_op.evaluate(lower_out, {concat, zero_t});
auto reduce_max_op = op::v1::ReduceMax(fake_param, zero, false);
auto upper_out = ov::TensorVector{upper_output_values[i]};
reduce_max_op.evaluate(upper_out, {concat, zero_t});
if (!upper_output_values[i]) {
fully_defined = false;
} else {
const auto output_maximum_value = make_tensor_max_of_type(upper_output_values[i].get_element_type());
op::v1::Select().evaluate(upper_out, {final_input_dyn_mask, output_maximum_value, upper_output_values[i]});
node->get_output_tensor(i).set_upper_value(upper_output_values[i]);
}
if (!lower_output_values[i]) {
fully_defined = false;
} else {
// Can not set to get_constant_min_of_type(lower_output_values[i]->get_element_type())
// yet
op::v1::Select().evaluate(lower_out, {final_input_dyn_mask, zero_t, lower_output_values[i]});
node->get_output_tensor(i).set_lower_value(lower_output_values[i]);
}
}
return fully_defined;
}
bool ov::tensor_is_positive(const Tensor& bound) {
const auto bound_constant =
std::make_shared<op::v0::Constant>(bound.get_element_type(), bound.get_shape(), bound.data());
const auto zero_constant = op::v0::Constant::create(bound.get_element_type(), {1}, {0});
OutputVector greater(1);
bool folded = std::make_shared<op::v1::Greater>(bound_constant, zero_constant)
->constant_fold(greater, {bound_constant, zero_constant});
OPENVINO_ASSERT(folded);
auto axes_vector = std::vector<int64_t>(greater[0].get_shape().size());
std::iota(axes_vector.begin(), axes_vector.end(), 0);
const auto axes = op::v0::Constant::create(element::i64, {axes_vector.size()}, axes_vector);
OutputVector all(1);
folded = std::make_shared<op::v1::ReduceLogicalAnd>(greater[0], axes)->constant_fold(all, {greater[0], axes});
OPENVINO_ASSERT(folded && ov::is_type<op::v0::Constant>(all[0].get_node_shared_ptr()));
OPENVINO_ASSERT(all[0].get_shape() == Shape{});
return std::dynamic_pointer_cast<op::v0::Constant>(all[0].get_node_shared_ptr())->cast_vector<bool>()[0];
}
bool ov::has_and_set_equal_bounds(const Output<Node>& source) {
if (op::util::is_constant(source.get_node_shared_ptr()))
return true;
auto bounds = ov::evaluate_both_bounds(source);
return are_same_tensor(bounds.first, bounds.second);
}

View File

@ -0,0 +1,46 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "bound_evaluation_util.hpp"
namespace ov {
// bool could_propagate(const Output<Node>& output, std::vector<Node*>& order);
/// \brief Checks if all the elements of the bound Tensor are positive
bool tensor_is_positive(const Tensor& bound);
/// \brief Estimates upper bound for node output tensors using only upper bounds of the nodes
/// inputs.
/// \param node Operation to be performed
/// \param output_values Vector of Tensors representing resulting upper value estimations
/// \return boolean status if value evaluation was successful.
bool default_upper_bound_evaluator(const Node* node, TensorVector& output_values);
/// \brief Estimates lower bound for node output tensors using only lower bounds of the nodes
/// inputs.
/// \param node Operation to be performed
/// \param output_values Vector of Tensors representing resulting lower value estimations
/// \return boolean status if value evaluation was successful.
bool default_lower_bound_evaluator(const Node* node, TensorVector& output_values);
/// \brief Estimates both bounds for node output tensors using both bounds of inputs. Works for
/// operations with two inputs (in_1 and in_2). Brute forces all the pairs of bounds for inputs
/// and evaluates all of them: {in_1_lower, in_2 lower}, {in_1_lower, in_2 upper}, {in_1_upper,
/// in_2_lower}, {in_1_upper, in_2_upper}. Lower and upper values are selected from all the
/// outputs calculated using input pairs.
///
/// \param node Operation to be performed
/// \param lower_output_values Vector of Tensors representing resulting lower value estimations
/// \param upper_output_values Vector of Tensors representing resulting upper value estimations
/// \return boolean status if value evaluation was successful.
bool interval_bound_evaluator(const Node* node, TensorVector& lower_output_values, TensorVector& upper_output_values);
/// \brief Checks if lower and upper bounds of the corresponding tensor are set (not nullptr)
/// and pointers are the same. It doesn't check if lower and upper values are the same relying
/// only on pointers comparison.
bool has_and_set_equal_bounds(const Output<Node>& source);
} // namespace ov

View File

@ -52,22 +52,22 @@ void ov::descriptor::Tensor::set_partial_shape(const PartialShape& partial_shape
OPENVINO_SUPPRESS_DEPRECATED_END
void ov::descriptor::Tensor::invalidate_values() {
m_upper_value = nullptr;
m_lower_value = nullptr;
m_upper_value = {};
m_lower_value = {};
m_value_label.clear();
}
void ov::descriptor::Tensor::set_lower_value(const ngraph::HostTensorPtr& value) {
NGRAPH_CHECK(value != nullptr);
NGRAPH_CHECK(m_partial_shape.same_scheme(value->get_partial_shape()));
NGRAPH_CHECK(m_element_type == value->get_element_type());
void ov::descriptor::Tensor::set_lower_value(const ov::Tensor& value) {
OPENVINO_ASSERT(static_cast<bool>(value));
OPENVINO_ASSERT(m_partial_shape.same_scheme(value.get_shape()));
OPENVINO_ASSERT(m_element_type == value.get_element_type());
m_lower_value = value;
}
void ov::descriptor::Tensor::set_upper_value(const ngraph::HostTensorPtr& value) {
NGRAPH_CHECK(value != nullptr);
NGRAPH_CHECK(m_partial_shape.same_scheme(value->get_partial_shape()));
NGRAPH_CHECK(m_element_type == value->get_element_type());
void ov::descriptor::Tensor::set_upper_value(const ov::Tensor& value) {
OPENVINO_ASSERT(static_cast<bool>(value));
OPENVINO_ASSERT(m_partial_shape.same_scheme(value.get_shape()));
OPENVINO_ASSERT(m_element_type == value.get_element_type());
m_upper_value = value;
}

View File

@ -6,12 +6,12 @@
#include <memory>
#include <ngraph/rt_info.hpp>
#include <ngraph/validation_util.hpp>
#include <sstream>
#include <typeindex>
#include <typeinfo>
#include "atomic_guard.hpp"
#include "bound_evaluate.hpp"
#include "itt.hpp"
#include "ngraph/graph_util.hpp"
#include "ngraph/op/constant.hpp"
@ -756,47 +756,21 @@ bool ov::Node::evaluate(ov::TensorVector& output_values,
}
bool ov::Node::evaluate_lower(ov::TensorVector& output_values) const {
HostTensorVector output = create_tmp_tensors(output_values);
OPENVINO_SUPPRESS_DEPRECATED_START
bool sts = evaluate_lower(output);
OPENVINO_SUPPRESS_DEPRECATED_END
update_output_tensors(output_values, output);
return sts;
const auto& inputs = input_values();
const auto all_have_bounds = std::all_of(inputs.begin(), inputs.end(), [](const Output<Node>& output) {
return output.get_tensor().has_and_set_bound();
});
return all_have_bounds && ov::default_lower_bound_evaluator(this, output_values);
}
bool ov::Node::evaluate_upper(ov::TensorVector& output_values) const {
HostTensorVector output = create_tmp_tensors(output_values);
OPENVINO_SUPPRESS_DEPRECATED_START
bool sts = evaluate_upper(output);
OPENVINO_SUPPRESS_DEPRECATED_END
update_output_tensors(output_values, output);
return sts;
}
OPENVINO_SUPPRESS_DEPRECATED_START
bool ov::Node::evaluate_lower(const HostTensorVector& output_values) const {
const auto& inputs = input_values();
bool dyn_inputs = std::any_of(inputs.begin(), inputs.end(), [](const Output<Node>& output) {
return !output.get_tensor().has_and_set_bound();
const auto all_have_bounds = std::all_of(inputs.begin(), inputs.end(), [](const Output<Node>& output) {
return output.get_tensor().has_and_set_bound();
});
if (dyn_inputs)
return false;
return ngraph::default_lower_bound_evaluator(this, output_values);
return all_have_bounds && ov::default_upper_bound_evaluator(this, output_values);
}
bool ov::Node::evaluate_upper(const HostTensorVector& output_values) const {
const auto& inputs = input_values();
bool dyn_inputs = std::any_of(inputs.begin(), inputs.end(), [](const Output<Node>& output) {
return !output.get_tensor().has_and_set_bound();
});
if (dyn_inputs)
return false;
return ngraph::default_upper_bound_evaluator(this, output_values);
}
OPENVINO_SUPPRESS_DEPRECATED_END
bool ov::Node::evaluate_label(TensorLabelVector& output_labels) const {
return false;
}

View File

@ -78,6 +78,17 @@ bool op::v1::Add::evaluate(const HostTensorVector& outputs, const HostTensorVect
return add::evaluate_add(inputs[0], inputs[1], outputs[0], get_autob());
}
bool op::v1::Add::evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const {
OV_OP_SCOPE(v1_Add_evaluate);
if (std::none_of(inputs.cbegin(), inputs.cend(), [](const ov::Tensor& t) {
return is_vector(t.get_shape()) && t.get_shape().front() == 0;
})) {
return BinaryElementwiseArithmetic::evaluate(outputs, inputs);
} else {
return true;
}
}
bool op::v1::Add::has_evaluate() const {
OV_OP_SCOPE(v1_Add_has_evaluate);
switch (get_input_element_type(0)) {

View File

@ -6,6 +6,7 @@
#include <ngraph/validation_util.hpp>
#include "bound_evaluate.hpp"
#include "itt.hpp"
#include "ngraph/runtime/reference/clamp.hpp"
#include "ngraph/util.hpp"
@ -139,10 +140,10 @@ bool op::Clamp::visit_attributes(AttributeVisitor& visitor) {
return true;
}
bool op::Clamp::evaluate_lower(const HostTensorVector& output_values) const {
return default_lower_bound_evaluator(this, output_values);
bool op::Clamp::evaluate_lower(ov::TensorVector& output_values) const {
return ov::default_lower_bound_evaluator(this, output_values);
}
bool op::Clamp::evaluate_upper(const HostTensorVector& output_values) const {
return default_upper_bound_evaluator(this, output_values);
bool op::Clamp::evaluate_upper(ov::TensorVector& output_values) const {
return ov::default_upper_bound_evaluator(this, output_values);
}

View File

@ -6,6 +6,7 @@
#include <memory>
#include "bound_evaluate.hpp"
#include "concat_shape_inference.hpp"
#include "dimension_tracker.hpp"
#include "itt.hpp"
@ -109,16 +110,44 @@ bool op::Concat::evaluate(const HostTensorVector& outputs, const HostTensorVecto
return evaluate_concat(inputs, outputs[0], concat_axis);
}
bool op::Concat::evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const {
OV_OP_SCOPE(v0_Concat_evaluate);
OPENVINO_ASSERT(!inputs.empty());
OPENVINO_ASSERT(outputs.size() == 1);
auto concat_axis = ov::normalize(get_axis(), inputs.front().get_shape().size());
std::vector<const char*> arg_bufs;
std::vector<ov::Shape> arg_shapes;
ov::Shape out_shape(inputs.front().get_shape());
out_shape[concat_axis] = 0;
for (auto& input : inputs) {
arg_bufs.push_back(static_cast<const char*>(input.data()));
arg_shapes.push_back(input.get_shape());
out_shape[concat_axis] += arg_shapes.back()[concat_axis];
}
outputs.front().set_shape(out_shape);
ngraph::runtime::reference::concat(arg_bufs,
static_cast<char*>(outputs.front().data()),
arg_shapes,
out_shape,
concat_axis,
outputs.front().get_element_type().size());
return true;
}
bool op::Concat::has_evaluate() const {
OV_OP_SCOPE(v0_Concat_has_evaluate);
return true;
}
bool op::Concat::evaluate_lower(const HostTensorVector& output_values) const {
bool op::Concat::evaluate_lower(ov::TensorVector& output_values) const {
return default_lower_bound_evaluator(this, output_values);
}
bool op::Concat::evaluate_upper(const HostTensorVector& output_values) const {
bool op::Concat::evaluate_upper(ov::TensorVector& output_values) const {
return default_upper_bound_evaluator(this, output_values);
}
@ -131,7 +160,7 @@ bool op::Concat::evaluate_label(TensorLabelVector& output_labels) const {
return false;
}
HostTensorVector idx_inputs;
TensorVector idx_inputs;
idx_inputs.reserve(inputs.size());
for (const auto& input : inputs) {
auto input_label = input.get_tensor().get_value_label();
@ -142,12 +171,16 @@ bool op::Concat::evaluate_label(TensorLabelVector& output_labels) const {
const auto& num_elements = shape_size(shape.to_shape());
input_label.resize(num_elements, no_label);
}
const auto& constant = Constant::create(element::u64, input.get_shape(), input_label);
idx_inputs.push_back(std::make_shared<HostTensor>(constant));
idx_inputs.emplace_back(element::from<label_t>(), input.get_shape());
std::copy_n(input_label.begin(), idx_inputs.back().get_size(), idx_inputs.back().data<ov::label_t>());
}
const auto& output_tensor = std::make_shared<HostTensor>(element::u64, get_output_shape(0));
evaluate({output_tensor}, idx_inputs);
output_labels[0] = std::make_shared<Constant>(output_tensor)->cast_vector<ov::label_t>();
return true;
auto outputs = TensorVector{{element::from<label_t>(), get_output_shape(0)}};
if (evaluate(outputs, idx_inputs)) {
output_labels.front() =
TensorLabel(outputs.front().data<label_t>(), outputs.front().data<label_t>() + outputs.front().get_size());
return true;
} else {
return false;
}
}

View File

@ -552,6 +552,7 @@ bool ov::op::v0::Constant::visit_attributes(AttributeVisitor& visitor) {
bool ov::op::v0::Constant::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const {
OV_OP_SCOPE(v0_Constant_evaluate);
auto output = outputs[0];
output->set_shape(get_shape());
output->write(get_data_ptr(), output->get_size_in_bytes());
return true;
}
@ -561,9 +562,9 @@ bool ov::op::v0::Constant::has_evaluate() const {
return true;
}
bool ov::op::v0::Constant::evaluate_lower(const HostTensorVector& outputs) const {
bool ov::op::v0::Constant::evaluate_lower(TensorVector& outputs) const {
return evaluate(outputs, {});
}
bool ov::op::v0::Constant::evaluate_upper(const HostTensorVector& outputs) const {
bool ov::op::v0::Constant::evaluate_upper(TensorVector& outputs) const {
return evaluate(outputs, {});
}

View File

@ -42,9 +42,7 @@ shared_ptr<Node> op::Convert::clone_with_new_inputs(const OutputVector& new_args
namespace convert {
namespace {
template <element::Type_t INPUT_ET, element::Type_t OUTPUT_ET>
bool evaluate(const HostTensorPtr& arg, const HostTensorPtr& out)
{
bool evaluate(const HostTensorPtr& arg, const HostTensorPtr& out) {
out->set_shape(arg->get_shape());
size_t element_count = shape_size(out->get_shape());
@ -125,19 +123,20 @@ bool evaluate_convert(const HostTensorPtr& arg, const HostTensorPtr& out) {
return rc;
}
bool evaluate_bound(const Node* node, const HostTensorVector& output_values, bool is_upper) {
NGRAPH_CHECK(node, validate_host_tensor_vector(output_values, 1));
bool evaluate_bound(const Node* node, ov::TensorVector& output_values, bool is_upper) {
NGRAPH_CHECK(node, output_values.size() == 1);
const auto& input = node->input_value(0);
if (const auto& value = is_upper ? input.get_tensor().get_upper_value() : input.get_tensor().get_lower_value()) {
OPENVINO_SUPPRESS_DEPRECATED_START
if (is_vector(value.get_shape()) && (value.get_shape().front() == 0)) {
return true;
}
bool status = node->evaluate(output_values, {value});
OPENVINO_SUPPRESS_DEPRECATED_END
if (!status)
return status;
const auto& input_element_type = input.get_element_type();
const auto& output_element_type = output_values[0]->get_element_type();
const auto& output_element_type = output_values[0].get_element_type();
if ((input_element_type.is_integral() && input_element_type.bitwidth() <= 16) ||
(output_element_type.is_integral() && output_element_type.bitwidth() <= 16)) {
return status;
@ -145,19 +144,25 @@ bool evaluate_bound(const Node* node, const HostTensorVector& output_values, boo
// constants for dynamic values translation
auto input_maximum_value = get_constant_max_of_type(input_element_type);
auto output_maximum_value = get_constant_max_of_type(output_values[0]->get_element_type());
auto output_maximum_value = get_constant_max_of_type(output_values[0].get_element_type());
if (input_maximum_value == nullptr || output_maximum_value == nullptr)
return false;
auto input_max = ov::Tensor(input_maximum_value->get_element_type(), input_maximum_value->get_shape());
memcpy(input_max.data(), input_maximum_value->get_data_ptr(), input_max.get_byte_size());
auto output_max = ov::Tensor(output_maximum_value->get_element_type(), output_maximum_value->get_shape());
memcpy(output_max.data(), output_maximum_value->get_data_ptr(), output_max.get_byte_size());
// dynamic values translation
auto input_dynamic_mask = std::make_shared<HostTensor>(element::boolean, input.get_shape());
status =
op::v1::Equal().evaluate({input_dynamic_mask}, {value, std::make_shared<HostTensor>(input_maximum_value)});
auto input_dynamic_mask = ov::Tensor(element::boolean, input.get_shape());
auto outputs = ov::TensorVector{input_dynamic_mask};
status = op::v1::Equal().evaluate(outputs, {value, input_max});
if (!status)
return status;
status = op::v1::Select().evaluate(
output_values,
{input_dynamic_mask, std::make_shared<HostTensor>(output_maximum_value), output_values[0]});
status = op::v1::Select().evaluate(output_values, {input_dynamic_mask, output_max, output_values[0]});
return status;
} else
return false;
@ -219,11 +224,11 @@ bool op::v0::Convert::has_evaluate() const {
return true;
}
bool op::v0::Convert::evaluate_lower(const HostTensorVector& output_values) const {
bool op::v0::Convert::evaluate_lower(ov::TensorVector& output_values) const {
return convert::evaluate_bound(this, output_values, false);
}
bool op::v0::Convert::evaluate_upper(const HostTensorVector& output_values) const {
bool op::v0::Convert::evaluate_upper(ov::TensorVector& output_values) const {
return convert::evaluate_bound(this, output_values, true);
}

View File

@ -6,12 +6,12 @@
#include <convolution_shape_inference.hpp>
#include "bound_evaluate.hpp"
#include "itt.hpp"
#include "ngraph/axis_vector.hpp"
#include "ngraph/coordinate_diff.hpp"
#include "ngraph/op/reshape.hpp"
#include "ngraph/util.hpp"
#include "ngraph/validation_util.hpp"
#include "openvino/op/util/precision_sensitive_attribute.hpp"
using namespace std;

View File

@ -6,6 +6,7 @@
#include <ngraph/validation_util.hpp>
#include "bound_evaluate.hpp"
#include "itt.hpp"
#include "ngraph/op/and.hpp"
#include "ngraph/op/equal.hpp"
@ -15,7 +16,6 @@
#include "ngraph/op/select.hpp"
#include "ngraph/runtime/host_tensor.hpp"
#include "ngraph/runtime/reference/divide.hpp"
#include "ngraph/validation_util.hpp"
using namespace std;
using namespace ngraph;
@ -60,28 +60,32 @@ bool evaluate_divide(const HostTensorPtr& arg0,
return rc;
}
HostTensorPtr equality_mask(const HostTensorPtr& tensor, const shared_ptr<op::Constant>& constant) {
auto mask = std::make_shared<HostTensor>(element::boolean, tensor->get_shape());
const auto& param = std::make_shared<op::Parameter>(tensor->get_element_type(), tensor->get_shape());
op::v1::Equal(param, constant, ngraph::op::AutoBroadcastType::NUMPY)
.evaluate({mask}, {tensor, std::make_shared<HostTensor>(constant)});
return mask;
ov::Tensor equality_mask(const ov::Tensor& tensor, const shared_ptr<op::Constant>& constant) {
auto mask_out = ov::TensorVector{{element::boolean, tensor.get_shape()}};
auto c_tensor = ov::Tensor(constant->get_element_type(), constant->get_shape());
memcpy(c_tensor.data(), constant->get_data_ptr(), c_tensor.get_byte_size());
const auto& param = std::make_shared<op::Parameter>(tensor.get_element_type(), tensor.get_shape());
op::v1::Equal(param, constant).evaluate(mask_out, ov::TensorVector{tensor, c_tensor});
return mask_out.front();
}
HostTensorPtr or_tensor(const HostTensorPtr& lhs, const HostTensorPtr& rhs) {
auto result = std::make_shared<HostTensor>();
op::v1::LogicalOr(std::make_shared<op::Parameter>(lhs->get_element_type(), lhs->get_shape()),
std::make_shared<op::Parameter>(rhs->get_element_type(), rhs->get_shape()),
ov::Tensor or_tensor(const ov::Tensor& lhs, const ov::Tensor& rhs) {
auto outs = ov::TensorVector{{lhs.get_element_type(), Shape{0}}};
op::v1::LogicalOr(std::make_shared<op::Parameter>(lhs.get_element_type(), lhs.get_shape()),
std::make_shared<op::Parameter>(rhs.get_element_type(), rhs.get_shape()),
ngraph::op::AutoBroadcastType::NUMPY)
.evaluate({result}, {lhs, rhs});
return result;
.evaluate(outs, ov::TensorVector{lhs, rhs});
return outs.front();
}
bool evaluate_bound(const Node* node, const HostTensorVector& output_values, bool is_upper) {
bool evaluate_bound(const Node* node, ov::TensorVector& output_values, bool is_upper) {
// for positive arg2 divide will have limits [low/up , up/low]
// for negative arg2 limits for divide will be [up/low, low/up]
// for arg2 range with both positive and negative values, divide can give any result [-inf, inf]
NGRAPH_CHECK(node, validate_host_tensor_vector(output_values, 1));
NGRAPH_CHECK(node, output_values.size() == 1);
const auto& input1 = node->input_value(0);
const auto& input2 = node->input_value(1);
@ -90,128 +94,132 @@ bool evaluate_bound(const Node* node, const HostTensorVector& output_values, boo
NGRAPH_CHECK(PartialShape::broadcast_merge_into(input_shape, input2.get_partial_shape(), node->get_autob()),
"Argument shapes in divide operation are inconsistent.");
std::shared_ptr<HostTensor> input1_low = evaluate_lower_bound(input1);
auto input1_low = ov::evaluate_lower_bound(input1);
if (!input1_low)
return false;
std::shared_ptr<HostTensor> input1_up = evaluate_upper_bound(input1);
auto input1_up = ov::evaluate_upper_bound(input1);
if (!input1_up)
return false;
std::shared_ptr<HostTensor> input2_low = evaluate_lower_bound(input2);
auto input2_low = ov::evaluate_lower_bound(input2);
if (!input2_low)
return false;
std::shared_ptr<HostTensor> input2_up = evaluate_upper_bound(input2);
auto input2_up = ov::evaluate_upper_bound(input2);
if (!input2_up)
return false;
auto zeros_const = op::Constant::create(input2.get_element_type(), {}, {0});
const auto zero_t = ov::Tensor(input2.get_element_type(), Shape{});
memcpy(zero_t.data(), zeros_const->get_data_ptr(), zero_t.get_byte_size());
auto max_constant = get_constant_max_of_type(input2.get_element_type());
auto dynamic_mask = or_tensor(equality_mask(input1_up, max_constant), equality_mask(input2_up, max_constant));
// mask to find out positive values for arg2
auto input2_positive_up_mask = std::make_shared<HostTensor>(element::boolean, input2.get_shape());
// mask to find out ranges around 0 for arg2
auto input2_low_negative_up_positive_mask = std::make_shared<HostTensor>(element::boolean, input2.get_shape());
auto less_up_outputs = ov::TensorVector{{element::boolean, input2.get_shape()}};
auto& input2_positive_up_mask = less_up_outputs.front();
bool status =
op::v1::Less().evaluate({input2_positive_up_mask}, {std::make_shared<HostTensor>(zeros_const), input2_up});
bool status = op::v1::Less().evaluate(less_up_outputs, ov::TensorVector{zero_t, input2_up});
if (!status)
return status;
// mask to find out negative values for arg2
auto input2_negative_low_mask = std::make_shared<HostTensor>(element::boolean, input2.get_shape());
status =
op::v1::Less().evaluate({input2_negative_low_mask}, {input2_low, std::make_shared<HostTensor>(zeros_const)});
auto less_low_outputs = ov::TensorVector{{element::boolean, input2.get_shape()}};
auto& input2_negative_low_mask = less_low_outputs.front();
status = op::v1::Less().evaluate(less_low_outputs, {input2_low, zero_t});
if (!status)
return status;
status = op::v1::LogicalAnd().evaluate({input2_low_negative_up_positive_mask},
{input2_negative_low_mask, input2_positive_up_mask});
// mask to find out ranges around 0 for arg2
auto logical_and_up_outputs = ov::TensorVector{{element::boolean, input2.get_shape()}};
auto& input2_low_negative_up_positive_mask = logical_and_up_outputs.front();
status = op::v1::LogicalAnd().evaluate(logical_and_up_outputs, {input2_negative_low_mask, input2_positive_up_mask});
if (!status)
return status;
auto value1_outs = ov::TensorVector{{input1.get_element_type(), input_shape.get_shape()}};
auto& value1 = value1_outs.front();
auto value2_outs = ov::TensorVector{{input2.get_element_type(), input2.get_shape()}};
auto& value2 = value2_outs.front();
if (!is_upper) {
auto value1 = std::make_shared<HostTensor>(input1.get_element_type(), input_shape);
status = op::v1::Select().evaluate({value1}, {input2_positive_up_mask, input1_low, input1_up});
status = op::v1::Select().evaluate(value1_outs, {input2_positive_up_mask, input1_low, input1_up});
if (!status)
return status;
auto value2 = std::make_shared<HostTensor>(input2.get_element_type(), input2.get_shape());
status = op::v1::Select().evaluate({value2}, {input2_positive_up_mask, input2_up, input2_low});
status = op::v1::Select().evaluate(value2_outs, {input2_positive_up_mask, input2_up, input2_low});
if (!status)
return status;
OPENVINO_SUPPRESS_DEPRECATED_START
status = node->evaluate(output_values, {value1, value2});
OPENVINO_SUPPRESS_DEPRECATED_END
status = node->evaluate(output_values, ov::TensorVector{value1, value2});
if (!status)
return status;
// replace values where zeros inside range of second arg to maximum values
auto output_minimum_value = get_constant_min_of_type(output_values[0]->get_element_type());
auto output_minimum_value = get_constant_min_of_type(output_values[0].get_element_type());
if (output_minimum_value == nullptr)
return false;
auto out_min_v = ov::Tensor(output_minimum_value->get_element_type(), output_minimum_value->get_shape());
memcpy(out_min_v.data(), output_minimum_value->get_data_ptr(), out_min_v.get_byte_size());
status = op::v1::Select().evaluate(output_values,
{input2_low_negative_up_positive_mask,
std::make_shared<HostTensor>(output_minimum_value),
output_values[0]});
{input2_low_negative_up_positive_mask, out_min_v, output_values[0]});
if (!status)
return status;
status = op::v1::Select().evaluate(output_values,
{dynamic_mask, std::make_shared<HostTensor>(zeros_const), output_values[0]});
status = op::v1::Select().evaluate(output_values, {dynamic_mask, zero_t, output_values[0]});
if (!status)
return status;
} else {
auto value1 = std::make_shared<HostTensor>(input1.get_element_type(), input_shape);
status = op::v1::Select().evaluate({value1}, {input2_positive_up_mask, input1_up, input1_low});
status = op::v1::Select().evaluate(value1_outs, {input2_positive_up_mask, input1_up, input1_low});
if (!status)
return status;
auto value2 = std::make_shared<HostTensor>(input2.get_element_type(), input2.get_shape());
status = op::v1::Select().evaluate({value2}, {input2_positive_up_mask, input2_low, input2_up});
status = op::v1::Select().evaluate(value2_outs, {input2_positive_up_mask, input2_low, input2_up});
if (!status)
return status;
// create mask where zeros in the second argument are placed
auto input2_zeros_mask = std::make_shared<HostTensor>(element::boolean, input2.get_shape());
bool status =
op::v1::Equal().evaluate({input2_zeros_mask}, {value2, std::make_shared<HostTensor>(zeros_const)});
auto eq_zero_mask = ov::TensorVector{{element::boolean, input2.get_shape()}};
auto& input2_zeros_mask = eq_zero_mask.front();
bool status = op::v1::Equal().evaluate(eq_zero_mask, {value2, zero_t});
if (!status)
return status;
// replace zeros by 1 values to get result of divide for other values of arguments
auto ones = op::Constant::create(input2.get_element_type(), input2.get_shape(), {1});
status = op::v1::Select().evaluate({value2}, {input2_zeros_mask, std::make_shared<HostTensor>(ones), value2});
auto ones_t = ov::Tensor(ones->get_element_type(), ones->get_shape());
memcpy(ones_t.data(), ones->get_data_ptr(), ones_t.get_byte_size());
status = op::v1::Select().evaluate(value2_outs, {input2_zeros_mask, ones_t, value2});
if (!status)
return status;
OPENVINO_SUPPRESS_DEPRECATED_START
status = node->evaluate(output_values, {value1, value2});
OPENVINO_SUPPRESS_DEPRECATED_END
if (!status)
return status;
// replace values where zeros were found in the second argument to maximum values
auto output_maximum_value = get_constant_max_of_type(output_values[0]->get_element_type());
auto output_maximum_value = get_constant_max_of_type(output_values[0].get_element_type());
if (output_maximum_value == nullptr)
return false;
status = op::v1::Select().evaluate(
output_values,
{input2_zeros_mask, std::make_shared<HostTensor>(output_maximum_value), output_values[0]});
auto out_max_v = ov::Tensor(output_maximum_value->get_element_type(), output_maximum_value->get_shape());
memcpy(out_max_v.data(), output_maximum_value->get_data_ptr(), out_max_v.get_byte_size());
status = op::v1::Select().evaluate(output_values, {input2_zeros_mask, out_max_v, output_values[0]});
if (!status)
return status;
// replace values where zeros inside [low, ip] values range of second arg to maximum values
status = op::v1::Select().evaluate(output_values,
{input2_low_negative_up_positive_mask,
std::make_shared<HostTensor>(output_maximum_value),
output_values[0]});
{input2_low_negative_up_positive_mask, out_max_v, output_values[0]});
if (!status)
return status;
// in case input elements were dynamic we replace them with zero
status = op::v1::Select().evaluate(
output_values,
{dynamic_mask, std::make_shared<HostTensor>(output_maximum_value), output_values[0]});
status = op::v1::Select().evaluate(output_values, {dynamic_mask, out_max_v, output_values[0]});
if (!status)
return status;
}
@ -271,10 +279,10 @@ bool op::v1::Divide::has_evaluate() const {
return false;
}
bool ov::op::v1::Divide::evaluate_lower(const HostTensorVector& outputs) const {
bool ov::op::v1::Divide::evaluate_lower(TensorVector& outputs) const {
return divide::evaluate_bound(this, outputs, false);
}
bool ov::op::v1::Divide::evaluate_upper(const HostTensorVector& outputs) const {
bool ov::op::v1::Divide::evaluate_upper(TensorVector& outputs) const {
return divide::evaluate_bound(this, outputs, true);
}

View File

@ -6,9 +6,9 @@
#include <convolution_shape_inference.hpp>
#include "bound_evaluate.hpp"
#include "itt.hpp"
#include "ngraph/attribute_visitor.hpp"
#include "ngraph/validation_util.hpp"
#include "openvino/op/util/precision_sensitive_attribute.hpp"
using namespace std;

View File

@ -5,8 +5,8 @@
#include "ngraph/op/non_max_suppression.hpp"
#include <cstring>
#include <ngraph/validation_util.hpp>
#include "bound_evaluate.hpp"
#include "itt.hpp"
#include "ngraph/attribute_visitor.hpp"
#include "ngraph/op/constant.hpp"

View File

@ -56,15 +56,19 @@ void op::v3::NonZero::validate_and_infer_types() {
set_input_is_relevant_to_shape(0);
if (const auto& input_constant =
get_constant_from_source(input_value(0))) { // input_value is available to calculate output shape
if (const auto& input_constant = get_constant_from_source(input_value(0))) {
// input_value is available to calculate output shape
const auto& input_data = std::make_shared<HostTensor>(input_constant);
auto output = std::make_shared<HostTensor>(m_output_type, get_output_partial_shape(0));
if (!evaluate({output}, {input_data}))
return;
set_output_type(0, m_output_type, output->get_partial_shape());
get_output_tensor(0).set_lower_value(output);
get_output_tensor(0).set_upper_value(output);
auto t = Tensor(output->get_element_type(), output->get_shape());
memcpy(t.data(), output->get_data_ptr(), t.get_byte_size());
get_output_tensor(0).set_lower_value(t);
get_output_tensor(0).set_upper_value(t);
}
}

View File

@ -8,6 +8,7 @@
#include <iterator>
#include <ngraph/validation_util.hpp>
#include "bound_evaluate.hpp"
#include "itt.hpp"
#include "ngraph/attribute_visitor.hpp"
#include "ngraph/op/util/op_types.hpp"

View File

@ -4,6 +4,7 @@
#include <ngraph/validation_util.hpp>
#include "bound_evaluate.hpp"
#include "itt.hpp"
#include "ngraph/op/min.hpp"
#include "ngraph/op/util/evaluate_helpers.hpp"
@ -79,14 +80,10 @@ bool op::v1::ReduceMin::has_evaluate() const {
return false;
}
bool op::v1::ReduceMin::evaluate_lower(const HostTensorVector& output_values) const {
if (!input_value(1).get_tensor().has_and_set_bound())
return false;
return default_lower_bound_evaluator(this, output_values);
bool op::v1::ReduceMin::evaluate_lower(ov::TensorVector& output_values) const {
return input_value(1).get_tensor().has_and_set_bound() && default_lower_bound_evaluator(this, output_values);
}
bool op::v1::ReduceMin::evaluate_upper(const HostTensorVector& output_values) const {
if (!input_value(1).get_tensor().has_and_set_bound())
return false;
return default_upper_bound_evaluator(this, output_values);
bool op::v1::ReduceMin::evaluate_upper(ov::TensorVector& output_values) const {
return input_value(1).get_tensor().has_and_set_bound() && default_upper_bound_evaluator(this, output_values);
}

View File

@ -4,8 +4,7 @@
#include "ngraph/op/reduce_prod.hpp"
#include <ngraph/validation_util.hpp>
#include "bound_evaluate.hpp"
#include "itt.hpp"
#include "ngraph/graph_util.hpp"
#include "ngraph/op/util/evaluate_helpers.hpp"
@ -87,22 +86,22 @@ bool op::v1::ReduceProd::has_evaluate() const {
return false;
}
bool op::v1::ReduceProd::evaluate_lower(const HostTensorVector& output_values) const {
bool op::v1::ReduceProd::evaluate_lower(ov::TensorVector& output_values) const {
if (!input_value(1).get_tensor().has_and_set_bound())
return false;
HostTensorPtr lb = input_value(0).get_tensor().get_lower_value(),
ub = input_value(0).get_tensor().get_upper_value();
if (!lb || !ub || !host_tensor_is_positive(lb) || !host_tensor_is_positive(ub))
const auto &lb = input_value(0).get_tensor().get_lower_value(), ub = input_value(0).get_tensor().get_upper_value();
if (!lb || !ub || !tensor_is_positive(lb) || !tensor_is_positive(ub))
return false;
return default_lower_bound_evaluator(this, output_values);
}
bool op::v1::ReduceProd::evaluate_upper(const HostTensorVector& output_values) const {
bool op::v1::ReduceProd::evaluate_upper(ov::TensorVector& output_values) const {
if (!input_value(1).get_tensor().has_and_set_bound())
return false;
HostTensorPtr lb = input_value(0).get_tensor().get_lower_value(),
ub = input_value(0).get_tensor().get_upper_value();
if (!lb || !ub || !host_tensor_is_positive(lb) || !host_tensor_is_positive(ub))
const auto &lb = input_value(0).get_tensor().get_lower_value(), ub = input_value(0).get_tensor().get_upper_value();
if (!lb || !ub || !tensor_is_positive(lb) || !tensor_is_positive(ub))
return false;
return default_upper_bound_evaluator(this, output_values);
}

View File

@ -8,6 +8,7 @@
#include <dimension_tracker.hpp>
#include <ngraph/validation_util.hpp>
#include "bound_evaluate.hpp"
#include "compare.hpp"
#include "itt.hpp"
#include "ngraph/op/constant.hpp"
@ -32,11 +33,16 @@ bool evaluate_reshape(const HostTensorPtr& arg0, const HostTensorPtr& out, const
template <element::Type_t ET>
void compute_output_shape(const HostTensorPtr& shape_pattern, std::vector<int64_t>& output_shape) {
using T = typename element_type_traits<ET>::value_type;
T* shape_pattern_ptr = shape_pattern->get_data_ptr<ET>();
size_t output_rank = shape_pattern->get_shape().empty() ? 0 : shape_pattern->get_shape()[0];
size_t output_rank;
if (shape_pattern->get_partial_shape().is_static()) {
output_rank = shape_pattern->get_shape().empty() ? 0 : shape_pattern->get_shape()[0];
} else {
// Can be dynamic during shape infer as conversion result from empty ov::Tensor
output_rank = 0;
}
for (size_t i = 0; i < output_rank; i++) {
output_shape.push_back(shape_pattern_ptr[i]);
output_shape.push_back(shape_pattern->get_data_ptr<ET>()[i]);
}
}
} // namespace
@ -81,11 +87,13 @@ void op::v1::Reshape::validate_and_infer_types() {
bool shape_can_be_calculated = false;
int64_t minus_one_idx = -1;
HostTensorPtr lb, ub;
ov::Tensor lb, ub;
std::tie(lb, ub) = evaluate_both_bounds(get_input_source_output(1));
if (lb && ub) {
const auto lower_bound = std::make_shared<op::v0::Constant>(lb)->cast_vector<int64_t>();
auto upper_bound = std::make_shared<op::v0::Constant>(ub)->cast_vector<int64_t>();
const auto lower_bound = std::make_shared<op::v0::Constant>(lb.get_element_type(), lb.get_shape(), lb.data())
->cast_vector<int64_t>();
auto upper_bound = std::make_shared<op::v0::Constant>(ub.get_element_type(), ub.get_shape(), ub.data())
->cast_vector<int64_t>();
shape_can_be_calculated = true;
NGRAPH_CHECK(lower_bound.size() == upper_bound.size());
const TensorLabel& labels = get_input_source_output(1).get_tensor().get_value_label();
@ -205,16 +213,12 @@ bool op::v1::Reshape::has_evaluate() const {
return false;
}
bool op::v1::Reshape::evaluate_lower(const HostTensorVector& output_values) const {
if (!get_input_tensor(1).has_and_set_bound())
return false;
return default_lower_bound_evaluator(this, output_values);
bool op::v1::Reshape::evaluate_lower(ov::TensorVector& output_values) const {
return get_input_tensor(1).has_and_set_bound() && default_lower_bound_evaluator(this, output_values);
}
bool op::v1::Reshape::evaluate_upper(const HostTensorVector& output_values) const {
if (!get_input_tensor(1).has_and_set_bound())
return false;
return default_upper_bound_evaluator(this, output_values);
bool op::v1::Reshape::evaluate_upper(ov::TensorVector& output_values) const {
return get_input_tensor(1).has_and_set_bound() && default_upper_bound_evaluator(this, output_values);
}
bool op::v1::Reshape::evaluate_label(TensorLabelVector& output_labels) const {

View File

@ -4,6 +4,7 @@
#include "ngraph/op/scatter_update.hpp"
#include "bound_evaluate.hpp"
#include "itt.hpp"
#include "ngraph/runtime/reference/scatter_update.hpp"
#include "ngraph/shape.hpp"
@ -92,13 +93,13 @@ bool op::v3::ScatterUpdate::evaluate(const HostTensorVector& outputs, const Host
return evaluate_scatter_update(outputs, inputs);
}
bool op::v3::ScatterUpdate::evaluate_lower(const HostTensorVector& outputs) const {
bool op::v3::ScatterUpdate::evaluate_lower(ov::TensorVector& outputs) const {
OV_OP_SCOPE(v3_ScatterUpdate_evaluate_lower);
return get_input_tensor(1).has_and_set_bound() && get_input_tensor(3).has_and_set_bound() &&
default_lower_bound_evaluator(this, outputs);
}
bool op::v3::ScatterUpdate::evaluate_upper(const HostTensorVector& outputs) const {
bool op::v3::ScatterUpdate::evaluate_upper(ov::TensorVector& outputs) const {
OV_OP_SCOPE(v3_ScatterUpdate_evaluate_upper);
return get_input_tensor(1).has_and_set_bound() && get_input_tensor(3).has_and_set_bound() &&
default_upper_bound_evaluator(this, outputs);
@ -136,12 +137,11 @@ bool scatter_label_evaluator(const Node* node, TensorLabelVector& output_labels)
return false;
}
constexpr auto element_type = (sizeof(ov::label_t) == 8) ? element::u64 : element::u32;
std::vector<ov::runtime::Tensor> input_tensors;
ov::TensorVector input_tensors;
input_tensors.reserve(input_values.size());
auto make_input_label = [&](const Output<Node>& input, TensorLabel& labels) {
input_tensors.emplace_back(element_type, input.get_shape());
input_tensors.emplace_back(ov::element::from<ov::label_t>(), input.get_shape());
labels.resize(shape_size(input.get_shape()));
memcpy(input_tensors.back().data(), labels.data(), input_tensors.back().get_byte_size());
};
@ -153,17 +153,14 @@ bool scatter_label_evaluator(const Node* node, TensorLabelVector& output_labels)
} else if (i == updates_in_idx) {
make_input_label(input, updates_labels);
} else {
const auto host_tensor_ptr = input.get_tensor().get_lower_value();
input_tensors.emplace_back(host_tensor_ptr->get_element_type(),
host_tensor_ptr->get_shape(),
host_tensor_ptr->get_data_ptr());
input_tensors.push_back(input.get_tensor().get_lower_value());
}
}
ov::TensorVector output_tensors{ov::Tensor(element_type, node->get_output_shape(0))};
ov::TensorVector output_tensors{ov::Tensor(ov::element::from<ov::label_t>(), node->get_output_shape(0))};
if (node->evaluate(output_tensors, input_tensors)) {
auto ptr = static_cast<ov::label_t*>(output_tensors[0].data(element_type));
output_labels[0] = ov::TensorLabel(ptr, ptr + output_tensors[0].get_size());
output_labels[0] = ov::TensorLabel(output_tensors[0].data<ov::label_t>(),
output_tensors[0].data<ov::label_t>() + output_tensors[0].get_size());
return true;
}
return false;

View File

@ -58,6 +58,12 @@ inline bool evaluate(const ov::Shape& shape, const HostTensorPtr& output_value)
return true;
}
template <element::Type_t ET>
inline bool evaluate(const ov::Shape& shape, ov::Tensor& output_value) {
runtime::reference::shape_of(shape, output_value.data<fundamental_type_for<ET>>());
return true;
}
bool evaluate_shape_of(const HostTensorPtr& output_value, const HostTensorPtr& input_value) {
bool rc = true;
ov::Shape shape = input_value->get_shape();
@ -74,6 +80,21 @@ bool evaluate_shape_of(const HostTensorPtr& output_value, const HostTensorPtr& i
return rc;
}
bool evaluate_shape_of(ov::Tensor& output_value, const Shape& input_shape) {
bool rc;
output_value.set_shape(ov::Shape{input_shape.size()});
switch (output_value.get_element_type()) {
NGRAPH_TYPE_CASE(evaluate_shape_of, i32, input_shape, output_value);
NGRAPH_TYPE_CASE(evaluate_shape_of, i64, input_shape, output_value);
NGRAPH_TYPE_CASE(evaluate_shape_of, u32, input_shape, output_value);
NGRAPH_TYPE_CASE(evaluate_shape_of, u64, input_shape, output_value);
default:
rc = false;
break;
}
return rc;
}
bool constant_fold_shape_of(Node* shape_of_node, Output<Node>& replacement, const Output<Node>& shape_of_input) {
auto partial_shape = shape_of_input.get_partial_shape();
auto output_type = shape_of_node->get_output_element_type(0);
@ -89,8 +110,8 @@ bool constant_fold_shape_of(Node* shape_of_node, Output<Node>& replacement, cons
return false;
}
bool evaluate_bound_shape(const Node* shape_of_node, const HostTensorVector& output_values, bool is_upper) {
NGRAPH_CHECK(shape_of_node, validate_host_tensor_vector(output_values, 1));
bool evaluate_bound_shape(const Node* shape_of_node, ov::TensorVector& output_values, bool is_upper) {
NGRAPH_CHECK(shape_of_node, output_values.size() == 1);
const auto& input_partial_shape = shape_of_node->get_input_partial_shape(0);
if (input_partial_shape.rank().is_dynamic())
return false;
@ -103,49 +124,33 @@ bool evaluate_bound_shape(const Node* shape_of_node, const HostTensorVector& out
: interval.get_max_val();
}
NGRAPH_CHECK(pshape_up.is_static() && pshape_low.is_static());
const auto input_et = shape_of_node->get_input_element_type(0);
const auto output_et = shape_of_node->get_output_element_type(0);
if (pshape_low.to_shape() == pshape_up.to_shape()) {
OPENVINO_SUPPRESS_DEPRECATED_START
shape_of_node->evaluate(output_values, {std::make_shared<HostTensor>(input_et, pshape_low)});
OPENVINO_SUPPRESS_DEPRECATED_END
shape_of::evaluate_shape_of(output_values[0], pshape_low.to_shape());
shape_of_node->get_output_tensor(0).set_lower_value(output_values[0]);
shape_of_node->get_output_tensor(0).set_upper_value(output_values[0]);
} else {
HostTensorVector upper =
is_upper ? output_values
: HostTensorVector{
std::make_shared<HostTensor>(output_et, ov::PartialShape{pshape_up.rank().get_length()})};
OPENVINO_SUPPRESS_DEPRECATED_START
shape_of_node->evaluate(upper, {std::make_shared<HostTensor>(input_et, pshape_up)});
OPENVINO_SUPPRESS_DEPRECATED_END
auto&& upper = is_upper ? output_values : ov::TensorVector{{output_et, Shape{pshape_up.to_shape().size()}}};
shape_of::evaluate_shape_of(upper[0], pshape_up.to_shape());
shape_of_node->get_output_tensor(0).set_upper_value(upper[0]);
HostTensorVector lower =
!is_upper ? output_values
: HostTensorVector{
std::make_shared<HostTensor>(output_et, ov::PartialShape{pshape_low.rank().get_length()})};
OPENVINO_SUPPRESS_DEPRECATED_START
shape_of_node->evaluate(lower, {std::make_shared<HostTensor>(input_et, pshape_low)});
OPENVINO_SUPPRESS_DEPRECATED_END
auto&& lower = is_upper ? ov::TensorVector{{output_et, Shape{pshape_low.to_shape().size()}}} : output_values;
shape_of::evaluate_shape_of(lower[0], pshape_low.to_shape());
shape_of_node->get_output_tensor(0).set_lower_value(lower[0]);
vector<bool> dynamic_mask; // true if dimension is dynamic
vector<char> dynamic_mask; // true if dimension is dynamic
for (const auto& i : input_partial_shape)
dynamic_mask.push_back(Dimension(i.get_interval().get_max_val()).is_dynamic());
auto mask_const = ngraph::op::Constant::create(element::boolean, {dynamic_mask.size()}, dynamic_mask);
auto dynamic_min_const = ngraph::op::Constant::create(output_et, {}, {0});
auto dynamic_max_const = ngraph::op::Constant::create(
output_et,
{},
{output_et == element::i64 ? std::numeric_limits<int64_t>::max() : std::numeric_limits<int32_t>::max()});
dynamic_mask.push_back(static_cast<char>(Dimension(i.get_interval().get_max_val()).is_dynamic()));
op::v1::Select().evaluate(
lower,
{std::make_shared<HostTensor>(mask_const), std::make_shared<HostTensor>(dynamic_min_const), lower[0]});
op::v1::Select().evaluate(
upper,
{std::make_shared<HostTensor>(mask_const), std::make_shared<HostTensor>(dynamic_max_const), upper[0]});
const auto mask_const = ov::Tensor(element::boolean, Shape{dynamic_mask.size()}, dynamic_mask.data());
auto&& min = output_et == element::i64 ? static_cast<int64_t>(0) : static_cast<int32_t>(0);
auto&& max =
output_et == element::i64 ? std::numeric_limits<int64_t>::max() : std::numeric_limits<int32_t>::max();
op::v1::Select().evaluate(lower, {mask_const, {output_et, Shape{}, &min}, lower.front()});
op::v1::Select().evaluate(upper, {mask_const, {output_et, Shape{}, &max}, upper.front()});
}
return true;
}
@ -173,6 +178,14 @@ bool op::v3::ShapeOf::evaluate(const HostTensorVector& output_values, const Host
return shape_of::evaluate_shape_of(output_values[0], input_values[0]);
}
bool op::v3::ShapeOf::evaluate(ov::TensorVector& output_values, const ov::TensorVector& input_values) const {
OV_OP_SCOPE(v0_ShapeOf_evaluate);
OPENVINO_ASSERT(input_values.size() == 1);
OPENVINO_ASSERT(output_values.size() == 1);
return shape_of::evaluate_shape_of(output_values[0], input_values[0].get_shape());
}
bool op::v3::ShapeOf::has_evaluate() const {
OV_OP_SCOPE(v3_ShapeOf_has_evaluate);
switch (get_output_element_type(0)) {
@ -187,11 +200,11 @@ bool op::v3::ShapeOf::has_evaluate() const {
return false;
}
bool op::v3::ShapeOf::evaluate_lower(const HostTensorVector& output_values) const {
bool op::v3::ShapeOf::evaluate_lower(ov::TensorVector& output_values) const {
return shape_of::evaluate_bound_shape(this, output_values, false);
}
bool op::v3::ShapeOf::evaluate_upper(const HostTensorVector& output_values) const {
bool op::v3::ShapeOf::evaluate_upper(ov::TensorVector& output_values) const {
return shape_of::evaluate_bound_shape(this, output_values, true);
}
@ -243,6 +256,14 @@ bool op::v0::ShapeOf::evaluate(const HostTensorVector& output_values, const Host
return shape_of::evaluate_shape_of(output_values[0], input_values[0]);
}
bool op::v0::ShapeOf::evaluate(ov::TensorVector& output_values, const ov::TensorVector& input_values) const {
OV_OP_SCOPE(v0_ShapeOf_evaluate);
OPENVINO_ASSERT(input_values.size() == 1);
OPENVINO_ASSERT(output_values.size() == 1);
return shape_of::evaluate_shape_of(output_values[0], input_values[0].get_shape());
}
bool op::v0::ShapeOf::has_evaluate() const {
OV_OP_SCOPE(v0_ShapeOf_has_evaluate);
switch (get_output_element_type(0)) {
@ -265,11 +286,11 @@ bool op::v0::ShapeOf::constant_fold(OutputVector& output_values, const OutputVec
return shape_of::constant_fold_shape_of(this, output_values[0], input_values[0]);
}
bool op::v0::ShapeOf::evaluate_lower(const HostTensorVector& output_values) const {
bool op::v0::ShapeOf::evaluate_lower(ov::TensorVector& output_values) const {
return shape_of::evaluate_bound_shape(this, output_values, false);
}
bool op::v0::ShapeOf::evaluate_upper(const HostTensorVector& output_values) const {
bool op::v0::ShapeOf::evaluate_upper(ov::TensorVector& output_values) const {
return shape_of::evaluate_bound_shape(this, output_values, true);
}

View File

@ -6,12 +6,12 @@
#include <numeric>
#include "bound_evaluate.hpp"
#include "itt.hpp"
#include "ngraph/attribute_visitor.hpp"
#include "ngraph/graph_util.hpp"
#include "ngraph/op/constant.hpp"
#include "ngraph/runtime/reference/slice.hpp"
#include "ngraph/validation_util.hpp"
#include "slice_shape_inference.hpp"
using namespace std;
@ -205,16 +205,12 @@ bool slice_input_check(const ov::Node* node) {
}
} // namespace
bool op::v8::Slice::evaluate_lower(const HostTensorVector& output_values) const {
if (!slice_input_check(this))
return false;
return default_lower_bound_evaluator(this, output_values);
bool op::v8::Slice::evaluate_lower(ov::TensorVector& output_values) const {
return slice_input_check(this) && default_lower_bound_evaluator(this, output_values);
}
bool op::v8::Slice::evaluate_upper(const HostTensorVector& output_values) const {
if (!slice_input_check(this))
return false;
return default_upper_bound_evaluator(this, output_values);
bool op::v8::Slice::evaluate_upper(ov::TensorVector& output_values) const {
return slice_input_check(this) && default_upper_bound_evaluator(this, output_values);
}
bool op::v8::Slice::evaluate_label(TensorLabelVector& output_labels) const {

View File

@ -7,6 +7,7 @@
#include <numeric>
#include <split_shape_inference.hpp>
#include "bound_evaluate.hpp"
#include "itt.hpp"
#include "ngraph/attribute_visitor.hpp"
#include "ngraph/builder/split.hpp"
@ -102,13 +103,13 @@ bool op::v1::Split::has_evaluate() const {
return get_input_element_type(1).is_integral_number();
}
bool op::v1::Split::evaluate_lower(const HostTensorVector& output_values) const {
bool op::v1::Split::evaluate_lower(ov::TensorVector& output_values) const {
OV_OP_SCOPE(v1_Split_evaluate_lower);
return input(1).get_tensor().has_and_set_bound() && default_lower_bound_evaluator(this, output_values);
}
bool op::v1::Split::evaluate_upper(const HostTensorVector& output_values) const {
bool op::v1::Split::evaluate_upper(ov::TensorVector& output_values) const {
OV_OP_SCOPE(v1_Split_evaluate_upper);
return input(1).get_tensor().has_and_set_bound() && default_upper_bound_evaluator(this, output_values);

View File

@ -9,10 +9,10 @@
#include <functional>
#include <set>
#include "bound_evaluate.hpp"
#include "itt.hpp"
#include "ngraph/op/constant.hpp"
#include "ngraph/runtime/reference/copy.hpp"
#include "ngraph/validation_util.hpp"
#include "squeeze_shape_inference.hpp"
using namespace std;
@ -109,19 +109,15 @@ bool op::v0::Squeeze::has_evaluate() const {
}
}
bool op::v0::Squeeze::evaluate_lower(const HostTensorVector& output_values) const {
bool op::v0::Squeeze::evaluate_lower(ov::TensorVector& output_values) const {
OV_OP_SCOPE(v0_Squeeze_evaluate_lower);
NGRAPH_CHECK(validate_host_tensor_vector(output_values, 1));
if (inputs().size() > 1 && !input_value(1).get_tensor().has_and_set_bound())
return false;
return default_lower_bound_evaluator(this, output_values);
}
bool op::v0::Squeeze::evaluate_upper(const HostTensorVector& output_values) const {
bool op::v0::Squeeze::evaluate_upper(ov::TensorVector& output_values) const {
OV_OP_SCOPE(v0_Squeeze_evaluate_upper);
NGRAPH_CHECK(validate_host_tensor_vector(output_values, 1));
if (inputs().size() > 1 && !input_value(1).get_tensor().has_and_set_bound())
return false;
return default_upper_bound_evaluator(this, output_values);

View File

@ -6,6 +6,7 @@
#include <algorithm>
#include "bound_evaluate.hpp"
#include "compare.hpp"
#include "itt.hpp"
#include "ngraph/attribute_visitor.hpp"
@ -17,7 +18,6 @@
#include "ngraph/slice_plan.hpp"
#include "ngraph/type/element_type_traits.hpp"
#include "ngraph/util.hpp"
#include "ngraph/validation_util.hpp"
#include "openvino/op/util/precision_sensitive_attribute.hpp"
#include "strided_slice_shape_inference.hpp"
@ -242,16 +242,12 @@ bool strided_slice_input_check(const ov::Node* node) {
}
} // namespace
bool op::v1::StridedSlice::evaluate_lower(const HostTensorVector& output_values) const {
if (!strided_slice_input_check(this))
return false;
return default_lower_bound_evaluator(this, output_values);
bool op::v1::StridedSlice::evaluate_lower(ov::TensorVector& output_values) const {
return strided_slice_input_check(this) && default_lower_bound_evaluator(this, output_values);
}
bool op::v1::StridedSlice::evaluate_upper(const HostTensorVector& output_values) const {
if (!strided_slice_input_check(this))
return false;
return default_upper_bound_evaluator(this, output_values);
bool op::v1::StridedSlice::evaluate_upper(ov::TensorVector& output_values) const {
return strided_slice_input_check(this) && default_upper_bound_evaluator(this, output_values);
}
bool op::v1::StridedSlice::evaluate_label(TensorLabelVector& output_labels) const {

View File

@ -4,9 +4,9 @@
#include "ngraph/op/tile.hpp"
#include <ngraph/validation_util.hpp>
#include <tile_shape_inference.hpp>
#include "bound_evaluate.hpp"
#include "itt.hpp"
#include "ngraph/op/constant.hpp"
#include "ngraph/runtime/reference/tile.hpp"
@ -110,13 +110,13 @@ bool op::v0::Tile::evaluate(const HostTensorVector& outputs, const HostTensorVec
return evaluate_tile(outputs, inputs);
}
bool op::v0::Tile::evaluate_lower(const HostTensorVector& output_values) const {
bool op::v0::Tile::evaluate_lower(ov::TensorVector& output_values) const {
OV_OP_SCOPE(v0_Tile_evaluate_lower);
return get_input_tensor(1).has_and_set_bound() && default_lower_bound_evaluator(this, output_values);
}
bool op::v0::Tile::evaluate_upper(const HostTensorVector& output_values) const {
bool op::v0::Tile::evaluate_upper(ov::TensorVector& output_values) const {
OV_OP_SCOPE(v0_Tile_evaluate_upper);
return get_input_tensor(1).has_and_set_bound() && default_upper_bound_evaluator(this, output_values);

View File

@ -4,6 +4,7 @@
#include "ngraph/op/transpose.hpp"
#include "bound_evaluate.hpp"
#include "itt.hpp"
#include "ngraph/runtime/reference/transpose.hpp"
#include "ngraph/validation_util.hpp"
@ -83,11 +84,11 @@ bool op::v1::Transpose::has_evaluate() const {
return get_input_element_type(1).is_integral_number();
}
bool op::v1::Transpose::evaluate_lower(const HostTensorVector& output_values) const {
bool op::v1::Transpose::evaluate_lower(ov::TensorVector& output_values) const {
return get_input_tensor(ORDER).has_and_set_bound() && default_lower_bound_evaluator(this, output_values);
}
bool op::v1::Transpose::evaluate_upper(const HostTensorVector& output_values) const {
bool op::v1::Transpose::evaluate_upper(ov::TensorVector& output_values) const {
return get_input_tensor(ORDER).has_and_set_bound() && default_upper_bound_evaluator(this, output_values);
}

View File

@ -8,9 +8,9 @@
#include <functional>
#include <set>
#include "bound_evaluate.hpp"
#include "itt.hpp"
#include "ngraph/runtime/reference/copy.hpp"
#include "ngraph/validation_util.hpp"
#include "unsqueeze_shape_inference.hpp"
using namespace std;
@ -126,16 +126,12 @@ bool op::v0::Unsqueeze::has_evaluate() const {
return false;
}
bool op::v0::Unsqueeze::evaluate_lower(const HostTensorVector& output_values) const {
if (!get_input_tensor(1).has_and_set_bound())
return false;
return default_lower_bound_evaluator(this, output_values);
bool op::v0::Unsqueeze::evaluate_lower(ov::TensorVector& output_values) const {
return get_input_tensor(1).has_and_set_bound() && default_lower_bound_evaluator(this, output_values);
}
bool op::v0::Unsqueeze::evaluate_upper(const HostTensorVector& output_values) const {
if (!get_input_tensor(1).has_and_set_bound())
return false;
return default_upper_bound_evaluator(this, output_values);
bool op::v0::Unsqueeze::evaluate_upper(ov::TensorVector& output_values) const {
return get_input_tensor(1).has_and_set_bound() && default_upper_bound_evaluator(this, output_values);
}
bool op::v0::Unsqueeze::evaluate_label(TensorLabelVector& output_labels) const {

View File

@ -4,8 +4,7 @@
#include "ngraph/op/util/binary_elementwise_arithmetic.hpp"
#include <ngraph/validation_util.hpp>
#include "bound_evaluate.hpp"
#include "itt.hpp"
#include "ngraph/attribute_visitor.hpp"
#include "ngraph/op/util/elementwise_args.hpp"
@ -46,24 +45,24 @@ bool ov::op::util::BinaryElementwiseArithmetic::visit_attributes(AttributeVisito
return true;
}
bool ov::op::util::BinaryElementwiseArithmetic::evaluate_upper(const HostTensorVector& output_values) const {
NGRAPH_CHECK(ngraph::validate_host_tensor_vector(output_values, 1));
HostTensorVector lower_output_tensors;
bool ov::op::util::BinaryElementwiseArithmetic::evaluate_upper(ov::TensorVector& output_values) const {
OPENVINO_ASSERT(output_values.size() == 1);
TensorVector lower_output_tensors;
for (const auto& output : output_values)
lower_output_tensors.push_back(
std::make_shared<HostTensor>(output->get_element_type(), output->get_partial_shape()));
if (!ngraph::interval_bound_evaluator(this, lower_output_tensors, output_values))
lower_output_tensors.emplace_back(output.get_element_type(), output.get_shape());
if (!interval_bound_evaluator(this, lower_output_tensors, output_values))
return false;
return true;
}
bool ov::op::util::BinaryElementwiseArithmetic::evaluate_lower(const HostTensorVector& output_values) const {
NGRAPH_CHECK(ngraph::validate_host_tensor_vector(output_values, 1));
HostTensorVector upper_output_tensors;
bool ov::op::util::BinaryElementwiseArithmetic::evaluate_lower(ov::TensorVector& output_values) const {
OPENVINO_ASSERT(output_values.size() == 1);
TensorVector upper_output_tensors;
for (const auto& output : output_values)
upper_output_tensors.push_back(
std::make_shared<HostTensor>(output->get_element_type(), output->get_partial_shape()));
if (!ngraph::interval_bound_evaluator(this, output_values, upper_output_tensors))
upper_output_tensors.emplace_back(output.get_element_type(), output.get_shape());
if (!interval_bound_evaluator(this, output_values, upper_output_tensors))
return false;
return true;
}

View File

@ -7,6 +7,7 @@
#include <ngraph/validation_util.hpp>
#include <numeric>
#include "bound_evaluate.hpp"
#include "itt.hpp"
#include "ngraph/attribute_visitor.hpp"
#include "ngraph/op/concat.hpp"
@ -233,7 +234,7 @@ void ov::op::util::BroadcastBase::validate_and_infer_types() {
" doesn't match rank of input tensor ",
input_rank);
if (output_shape_defined && ngraph::has_and_set_equal_bounds(input_value(2))) {
if (output_shape_defined && has_and_set_equal_bounds(input_value(2))) {
auto axes_mapping_val = get_constant_from_source(input_value(2))->get_axis_vector_val();
validate_target_shape_none(arg_shape, axes_mapping_val, output_shape);
}
@ -478,16 +479,16 @@ bool ov::op::util::BroadcastBase::evaluate(const HostTensorVector& outputs, cons
return evaluate_broadcast(inputs[0], outputs[0], pair_broadcast_axes, result_shape.to_shape());
}
bool ov::op::util::BroadcastBase::evaluate_lower(const HostTensorVector& output_values) const {
bool ov::op::util::BroadcastBase::evaluate_lower(ov::TensorVector& output_values) const {
if (!input_value(1).get_tensor().has_and_set_bound() ||
(get_input_size() > 2 && !input_value(2).get_tensor().has_and_set_bound()))
return false;
return ngraph::default_lower_bound_evaluator(this, output_values);
return default_lower_bound_evaluator(this, output_values);
}
bool ov::op::util::BroadcastBase::evaluate_upper(const HostTensorVector& output_values) const {
bool ov::op::util::BroadcastBase::evaluate_upper(ov::TensorVector& output_values) const {
if (!input_value(1).get_tensor().has_and_set_bound() ||
(get_input_size() > 2 && !input_value(2).get_tensor().has_and_set_bound()))
return false;
return ngraph::default_upper_bound_evaluator(this, output_values);
return default_upper_bound_evaluator(this, output_values);
}

View File

@ -4,8 +4,7 @@
#include "ngraph/op/util/gather_base.hpp"
#include <ngraph/validation_util.hpp>
#include "bound_evaluate.hpp"
#include "gather_shape_inference.hpp"
#include "itt.hpp"
#include "ngraph/op/concat.hpp"
@ -243,16 +242,16 @@ bool ov::op::util::GatherBase::evaluate(const HostTensorVector& outputs, const H
return gather::evaluate_gather(inputs[0], inputs[1], outputs[0], axis, batch_dims);
}
bool ov::op::util::GatherBase::evaluate_lower(const HostTensorVector& output_values) const {
bool ov::op::util::GatherBase::evaluate_lower(ov::TensorVector& output_values) const {
if (!get_input_tensor(1).has_and_set_bound() || !get_input_tensor(2).has_and_set_bound())
return false;
return ngraph::default_lower_bound_evaluator(this, output_values);
return default_lower_bound_evaluator(this, output_values);
}
bool ov::op::util::GatherBase::evaluate_upper(const HostTensorVector& output_values) const {
bool ov::op::util::GatherBase::evaluate_upper(ov::TensorVector& output_values) const {
if (!get_input_tensor(1).has_and_set_bound() || !get_input_tensor(2).has_and_set_bound())
return false;
return ngraph::default_upper_bound_evaluator(this, output_values);
return default_upper_bound_evaluator(this, output_values);
}
bool ov::op::util::GatherBase::evaluate_label(TensorLabelVector& output_labels) const {

View File

@ -6,6 +6,7 @@
#include <numeric>
#include "bound_evaluate.hpp"
#include "compare.hpp"
#include "itt.hpp"
#include "ngraph/runtime/reference/slice.hpp"
@ -127,16 +128,16 @@ bool op::v1::VariadicSplit::has_axis_and_splits_bound_set() const {
return true;
}
bool op::v1::VariadicSplit::evaluate_lower(const HostTensorVector& output_values) const {
bool op::v1::VariadicSplit::evaluate_lower(ov::TensorVector& output_values) const {
OV_OP_SCOPE(v1_Split_evaluate_lower);
return has_evaluate() && has_axis_and_splits_bound_set() && default_lower_bound_evaluator(this, output_values);
return has_axis_and_splits_bound_set() && default_lower_bound_evaluator(this, output_values);
}
bool op::v1::VariadicSplit::evaluate_upper(const HostTensorVector& output_values) const {
bool op::v1::VariadicSplit::evaluate_upper(ov::TensorVector& output_values) const {
OV_OP_SCOPE(v1_Split_evaluate_upper);
return has_evaluate() && has_axis_and_splits_bound_set() && default_upper_bound_evaluator(this, output_values);
return has_axis_and_splits_bound_set() && default_upper_bound_evaluator(this, output_values);
}
bool op::v1::VariadicSplit::evaluate_label(TensorLabelVector& output_labels) const {

View File

@ -161,7 +161,9 @@ bool ov::pass::ConstantFolding::pre_calculated_values_folding(const std::shared_
for (auto& output : curr_node->input_values()) {
if (is_output_foldable(output) && output.get_tensor().has_and_set_bound()) {
auto input_node = output.get_node_shared_ptr();
auto replacement = std::make_shared<ov::op::v0::Constant>(output.get_tensor().get_lower_value());
const auto& lower = output.get_tensor().get_lower_value();
auto replacement =
std::make_shared<ov::op::v0::Constant>(lower.get_element_type(), lower.get_shape(), lower.data());
if (replacement && !ov::is_type<ov::op::v0::Constant>(input_node)) {
replacement->set_friendly_name(
friendly_name_from(*input_node, input_node->get_output_size(), output.get_index()));

View File

@ -5,12 +5,13 @@
#include "ngraph/validation_util.hpp"
#include <algorithm>
#include <dimension_tracker.hpp>
#include <ngraph/ops.hpp>
#include <ngraph/rt_info.hpp>
#include <numeric>
#include "bound_evaluate.hpp"
#include "compare.hpp"
#include "dimension_tracker.hpp"
#include "ngraph/evaluator.hpp"
#include "ngraph/op/concat.hpp"
#include "ngraph/op/convert.hpp"
@ -24,6 +25,7 @@
#include "ngraph/shape.hpp"
#include "ngraph/type/element_type_traits.hpp"
#include "ngraph/util.hpp"
#include "openvino/op/ops.hpp"
#include "sequnce_generator.hpp"
NGRAPH_SUPPRESS_DEPRECATED_START
@ -1167,162 +1169,15 @@ void ngraph::evaluate_nodes(std::map<RawNodeOutput, HostTensorPtr>& value_map,
}
}
bool ngraph::could_propagate(const Output<Node>& output, std::vector<Node*>& order) {
bool status = true;
std::deque<Node*> nodes_to_calculate = {output.get_node()};
order.push_back(output.get_node());
while (status && !nodes_to_calculate.empty()) {
auto current_node = nodes_to_calculate.front();
nodes_to_calculate.pop_front();
if (current_node->inputs().empty() && !is_type<op::Constant>(current_node))
status = false;
else if (!is_type<op::v0::ShapeOf>(current_node) && !is_type<op::v3::ShapeOf>(current_node)) {
// not a leaf, not a shape_of -- continue to search
for (const auto& input_value : current_node->input_values()) {
const auto& input_node = input_value.get_node();
order.push_back(input_node);
nodes_to_calculate.push_front(input_node);
}
}
}
return status;
}
namespace {
void propagate_rt_info(Node* node, const Output<Node>& final_port) {
auto node_outputs = node->outputs();
bool same_outputs = std::all_of(node_outputs.begin(), node_outputs.end(), [](const Output<Node>& output) {
return output.get_tensor().has_and_set_bound();
});
if (same_outputs && op::is_constant(node)) // constant should not propagate it's rt_info
{
std::unordered_set<Node*> stop_nodes;
for (const auto& in : final_port.get_target_inputs())
stop_nodes.insert(in.get_node());
auto curr_node = node->shared_from_this();
for (const auto& output : node_outputs) {
if (output == final_port)
continue;
for (auto& in : output.get_target_inputs()) {
if (stop_nodes.count(in.get_node()))
continue;
try {
auto consumer = in.get_node()->shared_from_this();
copy_runtime_info({curr_node, consumer}, consumer);
} catch (const std::bad_weak_ptr&) {
// Exception can be thrown, if `shared_from_this()` was called during node creation.
// Continue propagation for other nodes.
}
}
}
}
}
bool are_equal(const HostTensorPtr& lhs, const HostTensorPtr& rhs, size_t max_elements_limit = 10) {
if (!lhs || !rhs)
return false;
const auto& lhs_shape = lhs->get_shape();
const auto& rhs_shape = rhs->get_shape();
OPENVINO_ASSERT(lhs_shape == rhs_shape);
const auto& lhs_et = lhs->get_element_type();
const auto& rhs_et = rhs->get_element_type();
OPENVINO_ASSERT(lhs_et == rhs_et);
if (shape_size(lhs_shape) > max_elements_limit)
return false;
auto mask = std::make_shared<HostTensor>(element::boolean, lhs_shape);
const auto& param = std::make_shared<op::Parameter>(lhs_et, lhs_shape);
bool eval_status = op::v1::Equal(param, param, ngraph::op::AutoBroadcastType::NUMPY).evaluate({mask}, {lhs, rhs});
OPENVINO_ASSERT(eval_status);
auto equal = op::Constant(mask).cast_vector<bool>();
return std::all_of(equal.begin(), equal.end(), [](bool i) {
return i;
});
}
HostTensorPtr evaluate_bound(const Output<Node>& output, bool is_upper, bool invalidate_all_unused_values = true) {
// bound is already set in the tensor
if (is_upper && output.get_tensor().get_upper_value() != nullptr)
return output.get_tensor().get_upper_value();
if (!is_upper && output.get_tensor().get_lower_value() != nullptr)
return output.get_tensor().get_lower_value();
std::vector<Node*> order;
if (could_propagate(output, order)) {
reverse(order.begin(), order.end());
for (const auto& node : order) {
HostTensorVector outputs;
for (const auto& out : node->outputs())
outputs.push_back(std::make_shared<HostTensor>(out));
if (is_upper ? node->evaluate_upper(outputs) : node->evaluate_lower(outputs)) {
const auto& input_values = node->input_values();
TensorLabelVector output_labels(outputs.size());
bool same_inputs = std::all_of(input_values.begin(), input_values.end(), [](const Output<Node>& input) {
auto& tensor = input.get_tensor();
return tensor.has_and_set_bound() || are_equal(tensor.get_lower_value(), tensor.get_upper_value());
});
for (size_t i = 0; i < outputs.size(); ++i) {
if ((same_inputs || is_upper) && node->get_output_tensor(i).get_upper_value() == nullptr)
node->get_output_tensor(i).set_upper_value(outputs[i]);
if ((same_inputs || !is_upper) && node->get_output_tensor(i).get_lower_value() == nullptr)
node->get_output_tensor(i).set_lower_value(outputs[i]);
if (are_equal(node->get_output_tensor(i).get_lower_value(),
node->get_output_tensor(i).get_upper_value()))
node->get_output_tensor(i).set_lower_value(node->get_output_tensor(i).get_upper_value());
}
if (node->evaluate_label(output_labels))
for (size_t i = 0; i < outputs.size(); ++i)
node->get_output_tensor(i).set_value_label(output_labels[i]);
for (const auto& input : input_values) {
auto& tensor = input.get_tensor();
bool should_invalidate = invalidate_all_unused_values;
if (tensor.get_lower_value() && shape_size(tensor.get_lower_value()->get_shape()) > 10)
should_invalidate |= true;
if (tensor.get_upper_value() && shape_size(tensor.get_upper_value()->get_shape()) > 10)
should_invalidate |= true;
if (should_invalidate && input.get_target_inputs().size() == 1)
tensor.invalidate_values();
}
propagate_rt_info(node, output);
} else {
break;
}
}
}
if (is_upper)
return output.get_tensor().get_upper_value();
else
return output.get_tensor().get_lower_value();
}
} // namespace
HostTensorPtr ngraph::evaluate_lower_bound(const Output<Node>& output) {
return evaluate_bound(output, false);
}
HostTensorPtr ngraph::evaluate_upper_bound(const Output<Node>& output) {
return evaluate_bound(output, true);
}
pair<HostTensorPtr, HostTensorPtr> ngraph::evaluate_both_bounds(const Output<Node>& output) {
evaluate_bound(output, false, false);
evaluate_upper_bound(output);
return {output.get_tensor_ptr()->get_lower_value(), output.get_tensor_ptr()->get_upper_value()};
}
bool ov::evaluate_as_partial_shape(const Output<Node>& output, PartialShape& pshape) {
HostTensorPtr lb, ub;
std::tie(lb, ub) = evaluate_both_bounds(output);
Tensor lb, ub;
std::tie(lb, ub) = ov::evaluate_both_bounds(output);
bool shape_defined = false;
if (lb && ub) {
auto lower_bound = std::make_shared<op::v0::Constant>(lb)->cast_vector<int64_t>();
auto upper_bound = std::make_shared<op::v0::Constant>(ub)->cast_vector<int64_t>();
auto lower_bound = std::make_shared<op::v0::Constant>(lb.get_element_type(), lb.get_shape(), lb.data())
->cast_vector<int64_t>();
auto upper_bound = std::make_shared<op::v0::Constant>(ub.get_element_type(), ub.get_shape(), ub.data())
->cast_vector<int64_t>();
NGRAPH_CHECK(lower_bound.size() == upper_bound.size());
const TensorLabel& labels = output.get_tensor().get_value_label();
NGRAPH_CHECK(labels.empty() || lower_bound.size() == labels.size());
@ -1360,9 +1215,7 @@ bool ov::default_label_evaluator(const Node* node, TensorLabelVector& output_lab
for (size_t i = 1; i < inputs_count; ++i) {
if (node->get_input_tensor(i).has_and_set_bound()) {
const auto& et = node->get_input_element_type(i);
const auto& shape = node->get_input_shape(i);
inputs.emplace_back(et, shape, node->get_input_tensor(i).get_lower_value()->get_data_ptr());
inputs.push_back(node->get_input_tensor(i).get_lower_value());
} else {
return false;
}
@ -1391,25 +1244,6 @@ bool ov::default_label_evaluator(const Node* node, TensorLabelVector& output_lab
return false;
}
inline bool default_bound_evaluator(const Node* node, const HostTensorVector& output_values, bool is_upper) {
HostTensorVector input_tensors;
for (const auto& input : node->input_values()) {
if (auto bound = is_upper ? input.get_tensor().get_upper_value() : input.get_tensor().get_lower_value())
input_tensors.push_back(bound);
else
return false;
}
return node->evaluate(output_values, input_tensors);
}
bool ngraph::default_lower_bound_evaluator(const Node* node, const HostTensorVector& output_values) {
return default_bound_evaluator(node, output_values, false);
}
bool ngraph::default_upper_bound_evaluator(const Node* node, const HostTensorVector& output_values) {
return default_bound_evaluator(node, output_values, true);
}
shared_ptr<op::Constant> ngraph::get_constant_max_of_type(element::Type_t t) {
#define NGRAPH_TYPE_TO_MAX_CONST(t) \
case t: \
@ -1493,162 +1327,14 @@ std::shared_ptr<op::Constant> ngraph::get_constant_lowest_of_type(element::Type_
}
}
namespace {
HostTensorPtr equality_mask(const HostTensorPtr& tensor, const shared_ptr<op::Constant>& constant) {
auto mask = std::make_shared<HostTensor>(element::boolean, tensor->get_shape());
const auto& param = std::make_shared<op::Parameter>(tensor->get_element_type(), tensor->get_shape());
op::v1::Equal(param, constant, ngraph::op::AutoBroadcastType::NUMPY)
.evaluate({mask}, {tensor, std::make_shared<HostTensor>(constant)});
return mask;
}
HostTensorPtr or_tensor(const HostTensorPtr& lhs, const HostTensorPtr& rhs) {
auto result = std::make_shared<HostTensor>();
op::v1::LogicalOr(std::make_shared<op::Parameter>(lhs->get_element_type(), lhs->get_shape()),
std::make_shared<op::Parameter>(rhs->get_element_type(), rhs->get_shape()),
ngraph::op::AutoBroadcastType::NUMPY)
.evaluate({result}, {lhs, rhs});
return result;
}
} // namespace
bool ngraph::interval_bound_evaluator(const Node* node,
const HostTensorVector& lower_output_values,
const HostTensorVector& upper_output_values) {
// TODO: relax for n inputs ?
NGRAPH_CHECK(lower_output_values.size() == upper_output_values.size());
NGRAPH_CHECK(node->get_input_size() == 2);
const auto num_of_outputs = node->get_output_size();
std::shared_ptr<HostTensor> low_0 = evaluate_lower_bound(node->get_input_source_output(0));
std::shared_ptr<HostTensor> low_1 = evaluate_lower_bound(node->get_input_source_output(1));
std::shared_ptr<HostTensor> up_0 = evaluate_upper_bound(node->get_input_source_output(0));
std::shared_ptr<HostTensor> up_1 = evaluate_upper_bound(node->get_input_source_output(1));
std::set<HostTensorVector> input_variants = {{low_0, low_1}, {low_0, up_1}, {up_0, low_1}, {up_0, up_1}};
for (const auto& variant_of_input_vector : input_variants)
for (const auto& input_tensor : variant_of_input_vector)
if (input_tensor == nullptr)
return false;
if (input_variants.size() == 1)
return node->evaluate(upper_output_values, *input_variants.begin()) &&
node->evaluate(lower_output_values, *input_variants.begin());
auto zero = op::v0::Constant::create(element::i64, {1}, {0});
std::vector<HostTensorVector> unsqueezed_output_variants;
for (auto& input_variant : input_variants) {
HostTensorVector vector_of_output_variants;
for (const auto& output : lower_output_values)
vector_of_output_variants.push_back(
std::make_shared<HostTensor>(output->get_element_type(), output->get_partial_shape()));
node->evaluate(vector_of_output_variants, input_variant);
HostTensorVector vector_of_unsqueezed_output_variants;
for (const auto& output : vector_of_output_variants) {
if (!output)
return false;
auto unsqueezed_shape = output->get_shape();
unsqueezed_shape.insert(unsqueezed_shape.begin(), 1);
const auto unsqueezed = make_shared<HostTensor>(output->get_element_type(), unsqueezed_shape);
op::v0::Unsqueeze().evaluate({unsqueezed}, {output, make_shared<HostTensor>(zero)});
vector_of_unsqueezed_output_variants.push_back(unsqueezed);
}
unsqueezed_output_variants.push_back(vector_of_unsqueezed_output_variants);
}
auto input_0_maximum_value = get_constant_max_of_type(low_0->get_element_type());
auto input_1_maximum_value = get_constant_max_of_type(low_1->get_element_type());
if (input_0_maximum_value == nullptr || input_1_maximum_value == nullptr)
return false;
auto input_0_low_dyn_mask = equality_mask(low_0, input_0_maximum_value);
auto input_0_up_dyn_mask = equality_mask(up_0, input_0_maximum_value);
auto input_1_low_dyn_mask = equality_mask(low_1, input_1_maximum_value);
auto input_1_up_dyn_mask = equality_mask(up_1, input_1_maximum_value);
auto final_input_dyn_mask = or_tensor(or_tensor(input_0_low_dyn_mask, input_0_up_dyn_mask),
or_tensor(input_1_low_dyn_mask, input_1_up_dyn_mask));
bool fully_defined = true;
for (size_t i = 0; i < num_of_outputs; ++i) {
HostTensorVector all_variants_for_ith_output;
for (const auto& unsqueezed_output_variant : unsqueezed_output_variants)
all_variants_for_ith_output.push_back(unsqueezed_output_variant[i]);
auto concated_shape = all_variants_for_ith_output[0]->get_shape();
concated_shape[0] = all_variants_for_ith_output.size();
auto concated = make_shared<HostTensor>(all_variants_for_ith_output[0]->get_element_type(), concated_shape);
auto concat = op::Concat();
concat.set_axis(0);
concat.evaluate({concated}, all_variants_for_ith_output);
auto fake_param =
make_shared<op::Parameter>(all_variants_for_ith_output[0]->get_element_type(), concated_shape);
auto reduce_min_op = op::v1::ReduceMin(fake_param, zero, false);
reduce_min_op.evaluate({lower_output_values[i]}, {concated, make_shared<HostTensor>(zero)});
auto reduce_max_op = op::v1::ReduceMax(fake_param, zero, false);
reduce_max_op.evaluate({upper_output_values[i]}, {concated, make_shared<HostTensor>(zero)});
if (upper_output_values[i] == nullptr)
fully_defined = false;
else {
auto output_maximum_value = get_constant_max_of_type(upper_output_values[i]->get_element_type());
op::v1::Select().evaluate(
{upper_output_values[i]},
{final_input_dyn_mask, std::make_shared<HostTensor>(output_maximum_value), upper_output_values[i]});
node->get_output_tensor(i).set_upper_value(upper_output_values[i]);
}
if (lower_output_values[i] == nullptr)
fully_defined = false;
else {
auto output_minimum_value = op::Constant::create(lower_output_values[i]->get_element_type(), {}, {0});
// Can not set to get_constant_min_of_type(lower_output_values[i]->get_element_type())
// yet
op::v1::Select().evaluate(
{lower_output_values[i]},
{final_input_dyn_mask, std::make_shared<HostTensor>(output_minimum_value), lower_output_values[i]});
node->get_output_tensor(i).set_lower_value(lower_output_values[i]);
}
}
return fully_defined;
}
bool ngraph::host_tensor_is_positive(const HostTensorPtr& bound) {
const auto bound_constant = std::make_shared<op::Constant>(bound);
const auto zero_constant = op::Constant::create(bound->get_element_type(), {1}, {0});
OutputVector greater(1);
bool folded = std::make_shared<op::v1::Greater>(bound_constant, zero_constant)
->constant_fold(greater, {bound_constant, zero_constant});
NGRAPH_CHECK(folded);
auto axes_vector = std::vector<int64_t>(greater[0].get_shape().size());
std::iota(axes_vector.begin(), axes_vector.end(), 0);
const auto axes = op::Constant::create(element::i64, {axes_vector.size()}, axes_vector);
OutputVector all(1);
folded = std::make_shared<op::v1::ReduceLogicalAnd>(greater[0], axes)->constant_fold(all, {greater[0], axes});
NGRAPH_CHECK(folded && ov::is_type<op::Constant>(all[0].get_node_shared_ptr()));
NGRAPH_CHECK(all[0].get_shape() == Shape{});
return std::dynamic_pointer_cast<op::Constant>(all[0].get_node_shared_ptr())->cast_vector<bool>()[0];
}
bool ngraph::has_and_set_equal_bounds(const Output<Node>& source) {
if (op::is_constant(source.get_node_shared_ptr()))
return true;
HostTensorPtr lb, ub;
std::tie(lb, ub) = evaluate_both_bounds(source);
return lb && lb == ub;
}
shared_ptr<op::Constant> ov::get_constant_from_source(const Output<Node>& source) {
if (!has_and_set_equal_bounds(source))
return nullptr;
if (const auto& c = ov::as_type_ptr<op::v0::Constant>(source.get_node_shared_ptr()))
return c;
return std::make_shared<op::v0::Constant>(source.get_tensor().get_upper_value());
const auto t = source.get_tensor().get_upper_value();
return std::make_shared<op::v0::Constant>(t.get_element_type(), t.get_shape(), t.data());
}
bool ngraph::validate_host_tensor_vector(const HostTensorVector& tensor_vector, const size_t& size) {

View File

@ -20,18 +20,14 @@ using TestParams = std::tuple<int64_t, LabeledShapeVector>;
class EvaluateLabelTest : public Test {
protected:
ov::element::Type label_dtype{ov::element::u64};
ov::TensorLabelVector out_labels;
bool exp_evaluate_status;
ov::TensorLabelVector out_labels;
ov::TensorVector exp_result, inputs;
std::vector<std::vector<uint64_t>> labels_u64; //!< Storage for tensor labels.
};
class ConcatEvaluateLabelTest : public EvaluateLabelTest, public WithParamInterface<TestParams> {
protected:
void SetUp() override {
exp_result = ov::TensorVector{ov::Tensor(ov::element::u64, {0})};
const auto& labeled_shapes = std::get<1>(GetParam());
exp_evaluate_status =
@ -44,7 +40,7 @@ protected:
bool add_labels;
std::tie(shape, add_labels) = labeled_shape;
auto param = params.make<Parameter>(ov::element::u64, shape);
auto param = params.make<Parameter>(ov::element::from<ov::label_t>(), shape);
if (exp_evaluate_status) {
auto min_shape = shape.get_min_shape();
@ -55,8 +51,8 @@ protected:
param->get_default_output().get_tensor().set_value_label(labels);
}
labels_u64.emplace_back(std::vector<uint64_t>(labels.cbegin(), labels.cend()));
inputs.emplace_back(label_dtype, min_shape, labels_u64.back().data());
inputs.emplace_back(ov::element::from<ov::label_t>(), min_shape);
std::copy_n(labels.begin(), labels.size(), inputs.back().data<ov::label_t>());
}
}
}
@ -102,12 +98,14 @@ INSTANTIATE_TEST_SUITE_P(evaluate_bound,
TEST_P(ConcatEvaluateLabelTest, evaluate_label) {
const auto concat = std::make_shared<Concat>(params.get(), std::get<0>(GetParam()));
out_labels.resize(concat->get_output_size());
exp_result.emplace_back(ov::element::from<ov::label_t>(), concat->get_shape());
ASSERT_EQ(concat->evaluate_label(out_labels), exp_evaluate_status);
if (exp_evaluate_status) {
concat->evaluate(exp_result, inputs);
}
ASSERT_EQ(concat->evaluate_label(out_labels), exp_evaluate_status);
ASSERT_THAT(out_labels.front(),
ElementsAreArray(exp_result.front().data<uint64_t>(), exp_result.front().get_size()));
ASSERT_THAT(out_labels.front(),
ElementsAreArray(exp_result.front().data<ov::label_t>(), exp_result.front().get_size()));
}
}

View File

@ -31,9 +31,9 @@ protected:
std::generate_n(std::back_inserter(lower_values), shape_size(p_shape.get_min_shape()), SeqGen<int32_t>(-10));
std::generate_n(std::back_inserter(upper_values), shape_size(p_shape.get_min_shape()), SeqGen<int32_t>(20));
lower_v_tensor = std::make_shared<HostTensor>(dtype, p_shape.get_min_shape(), lower_values.data());
upper_v_tensor = std::make_shared<HostTensor>(dtype, p_shape.get_min_shape(), upper_values.data());
axes_v_tensor = std::make_shared<HostTensor>(dtype, Shape{axes_order.size()}, axes_order.data());
lower_v_tensor = ov::Tensor(dtype, p_shape.get_min_shape(), lower_values.data());
upper_v_tensor = ov::Tensor(dtype, p_shape.get_min_shape(), upper_values.data());
axes_v_tensor = ov::Tensor(dtype, Shape{axes_order.size()}, axes_order.data());
arg = std::make_shared<Parameter>(dtype, p_shape);
order = std::make_shared<Parameter>(dtype, Shape{axes_order.size()});
@ -43,12 +43,12 @@ protected:
result = exp_result = TensorVector{Tensor(dtype, {0})};
}
void node_set_lower_and_upper(Node* node, const HostTensorPtr& lower, const HostTensorPtr& upper) {
if (lower != nullptr) {
void node_set_lower_and_upper(Node* node, const ov::Tensor& lower, const ov::Tensor& upper) {
if (lower) {
node->get_output_tensor(0).set_lower_value(lower);
}
if (upper != nullptr) {
if (upper) {
node->get_output_tensor(0).set_upper_value(upper);
}
}
@ -58,7 +58,7 @@ protected:
element::Type label_dtype{element::from<label_t>()};
std::vector<int32_t> axes_order, lower_values, upper_values;
HostTensorPtr lower_v_tensor, upper_v_tensor, axes_v_tensor;
ov::Tensor lower_v_tensor, upper_v_tensor, axes_v_tensor;
TensorVector result, exp_result;
std::shared_ptr<Transpose> transpose;
std::shared_ptr<Parameter> arg, order;
@ -92,7 +92,7 @@ TEST_P(TransposeEvalBoundTest, evaluate_lower) {
}
TEST_P(TransposeEvalBoundTest, evaluate_lower_but_arg_lower_values_not_set) {
node_set_lower_and_upper(arg.get(), nullptr, upper_v_tensor);
node_set_lower_and_upper(arg.get(), ov::Tensor(), upper_v_tensor);
node_set_lower_and_upper(order.get(), axes_v_tensor, axes_v_tensor);
ASSERT_FALSE(transpose->evaluate_lower(result));
@ -119,7 +119,7 @@ TEST_P(TransposeEvalBoundTest, evaluate_upper) {
}
TEST_P(TransposeEvalBoundTest, evaluate_upper_but_arg_upper_values_not_set) {
node_set_lower_and_upper(arg.get(), upper_v_tensor, nullptr);
node_set_lower_and_upper(arg.get(), upper_v_tensor, ov::Tensor());
node_set_lower_and_upper(order.get(), axes_v_tensor, axes_v_tensor);
ASSERT_FALSE(transpose->evaluate_upper(result));