Dimension tracking: Label propagation along with partial value propagation (#9834)

This commit is contained in:
Evgenya Stepyreva
2022-01-25 12:35:56 +03:00
committed by GitHub
parent 4a85982427
commit 3bd6eb6dcd
36 changed files with 597 additions and 12 deletions

View File

@@ -19,5 +19,6 @@ namespace ngraph {
namespace descriptor {
/// \brief Compile-time descriptor of a first-class value that is a tensor.
using ov::descriptor::Tensor;
using TensorLabel = std::vector<size_t>;
} // namespace descriptor
} // namespace ngraph

View File

@@ -56,6 +56,8 @@ class HostTensor;
using HostTensor = runtime::HostTensor;
using HostTensorPtr = std::shared_ptr<HostTensor>;
using HostTensorVector = std::vector<HostTensorPtr>;
using TensorLabel = std::vector<size_t>;
using TensorLabelVector = std::vector<TensorLabel>;
namespace op {

View File

@@ -26,6 +26,7 @@ using HostTensorPtr = std::shared_ptr<runtime::HostTensor>;
namespace ov {
class Node;
using TensorLabel = std::vector<size_t>;
namespace descriptor {
/// \brief Compile-time descriptor of a first-class value that is a tensor.
class OPENVINO_API Tensor {
@@ -59,6 +60,8 @@ public:
void set_lower_value(const ngraph::HostTensorPtr& value);
/// \brief sets upper bound value description
void set_upper_value(const ngraph::HostTensorPtr& value);
/// \brief sets value label description
void set_value_label(const TensorLabel& value_label);
/// \brief unsets bound value descriptions
void invalidate_values();
@@ -77,6 +80,10 @@ public:
ngraph::HostTensorPtr get_upper_value() const {
return m_upper_value;
}
/// \brief gets upper bound value description
TensorLabel get_value_label() const {
return m_value_label;
}
/// \brief checks if lower and upper bound are set and point to the same HostTensor
bool has_and_set_bound() const {
return m_upper_value != nullptr && m_upper_value == m_lower_value;
@@ -110,6 +117,7 @@ protected:
PartialShape m_partial_shape;
ngraph::HostTensorPtr m_lower_value, m_upper_value;
TensorLabel m_value_label;
std::string m_name;
std::unordered_set<std::string> m_names;

View File

@@ -65,6 +65,7 @@ class Matcher;
using HostTensor = ngraph::runtime::HostTensor;
using HostTensorPtr = std::shared_ptr<HostTensor>;
using HostTensorVector = std::vector<HostTensorPtr>;
using TensorLabelVector = std::vector<TensorLabel>;
template <typename NodeType>
class Input;
@@ -243,6 +244,7 @@ public:
const ov::EvaluationContext& evaluationContext) const;
virtual bool evaluate_lower(ov::TensorVector& output_values) const;
virtual bool evaluate_upper(ov::TensorVector& output_values) const;
virtual bool evaluate_label(TensorLabelVector& output_labels) const;
virtual bool constant_fold(OutputVector& output_values, const OutputVector& inputs_values);
/// \brief Decomposes the FusedOp into a sub-graph consisting of core openvino ops

View File

@@ -125,4 +125,12 @@ OPENVINO_API bool evaluate_as_partial_shape(const Output<Node>& output, PartialS
/// \brief Runs an estimation of source tensor. If it succeeded to calculate both bounds and
/// they are the same returns Constant operation from the resulting bound, otherwise nullptr.
OPENVINO_API std::shared_ptr<op::v0::Constant> get_constant_from_source(const Output<Node>& source);
/// \brief Propagates value label from 0 input to the only output through an operation.
/// Not applicable for operations which require values interaction (example: mathematical
/// operations). Could be used for movement operations (example: gathering, shape change)
/// \param node Operation to be performed
/// \param output_labels Vector of TensorLabel objects representing resulting value labels
/// \return boolean status if label evaluation was successful.
OPENVINO_API bool default_label_evaluator(const Node* node, TensorLabelVector& output_labels);
} // namespace ov

View File

@@ -58,6 +58,7 @@ public:
bool evaluate_lower(const HostTensorVector& output_values) const override;
bool evaluate_upper(const HostTensorVector& output_values) const override;
OPENVINO_SUPPRESS_DEPRECATED_END
bool evaluate_label(TensorLabelVector& output_labels) const override;
protected:
/// \ brief m_axis stores default value for all iterations

View File

@@ -47,6 +47,7 @@ public:
bool evaluate_lower(const HostTensorVector& outputs) const override;
bool evaluate_upper(const HostTensorVector& outputs) const override;
OPENVINO_SUPPRESS_DEPRECATED_END
bool evaluate_label(TensorLabelVector& output_labels) const override;
protected:
ov::element::Type m_destination_type;

View File

@@ -54,6 +54,7 @@ public:
bool evaluate_lower(const HostTensorVector& outputs) const override;
bool evaluate_upper(const HostTensorVector& outputs) const override;
OPENVINO_SUPPRESS_DEPRECATED_END
bool evaluate_label(TensorLabelVector& output_labels) const override;
bool constant_fold(OutputVector& output_values, const OutputVector& inputs_values) override;
protected:

View File

@@ -40,6 +40,7 @@ public:
bool evaluate_lower(const HostTensorVector& output_values) const override;
bool evaluate_upper(const HostTensorVector& output_values) const override;
OPENVINO_SUPPRESS_DEPRECATED_END
bool evaluate_label(TensorLabelVector& output_labels) const override;
bool constant_fold(OutputVector& output_values, const OutputVector& input_values) override;
private:
@@ -70,6 +71,7 @@ public:
bool evaluate_lower(const HostTensorVector& output_values) const override;
bool evaluate_upper(const HostTensorVector& output_values) const override;
OPENVINO_SUPPRESS_DEPRECATED_END
bool evaluate_label(TensorLabelVector& output_labels) const override;
bool constant_fold(OutputVector& output_values, const OutputVector& input_values) override;
};
} // namespace v0

View File

@@ -38,6 +38,11 @@ public:
// TODO: Update to use new evaluate with TensorVector
bool evaluate(const HostTensorVector&, const HostTensorVector&) const override;
OPENVINO_SUPPRESS_DEPRECATED_END
OPENVINO_SUPPRESS_DEPRECATED_START
bool evaluate_lower(const HostTensorVector& outputs) const override;
bool evaluate_upper(const HostTensorVector& outputs) const override;
OPENVINO_SUPPRESS_DEPRECATED_END
bool evaluate_label(TensorLabelVector& output_labels) const override;
std::shared_ptr<ngraph::op::v0::Constant> get_default_const_axes(const Output<Node>& start) const;
PartialShape calculate_output_shape(const std::vector<int64_t>& starts,

View File

@@ -28,6 +28,7 @@ public:
bool evaluate_lower(const HostTensorVector& outputs) const override;
bool evaluate_upper(const HostTensorVector& outputs) const override;
OPENVINO_SUPPRESS_DEPRECATED_END
bool evaluate_label(TensorLabelVector& output_labels) const override;
bool constant_fold(OutputVector& output_values, const OutputVector& inputs_values) override;
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;

View File

@@ -100,6 +100,7 @@ public:
bool evaluate_lower(const HostTensorVector& outputs) const override;
bool evaluate_upper(const HostTensorVector& outputs) const override;
OPENVINO_SUPPRESS_DEPRECATED_END
bool evaluate_label(TensorLabelVector& output_labels) const override;
private:
AxisSet convert_mask_to_axis_set(const std::vector<int64_t>& mask) const;

View File

@@ -29,6 +29,7 @@ public:
bool evaluate_lower(const HostTensorVector& output_values) const override;
bool evaluate_upper(const HostTensorVector& output_values) const override;
OPENVINO_SUPPRESS_DEPRECATED_END
bool evaluate_label(TensorLabelVector& output_labels) const override;
bool constant_fold(OutputVector& output_values, const OutputVector& inputs_values) override;

View File

@@ -34,6 +34,7 @@ public:
bool evaluate_lower(const HostTensorVector& outputs) const override;
bool evaluate_upper(const HostTensorVector& outputs) const override;
OPENVINO_SUPPRESS_DEPRECATED_END
bool evaluate_label(TensorLabelVector& output_labels) const override;
bool constant_fold(OutputVector& output_values, const OutputVector& inputs_values) override;
const int64_t& get_batch_dims() const;

View File

@@ -41,6 +41,7 @@ OPENVINO_SUPPRESS_DEPRECATED_END
void ov::descriptor::Tensor::invalidate_values() {
m_upper_value = nullptr;
m_lower_value = nullptr;
m_value_label.clear();
}
void ov::descriptor::Tensor::set_lower_value(const ngraph::HostTensorPtr& value) {
@@ -57,6 +58,17 @@ void ov::descriptor::Tensor::set_upper_value(const ngraph::HostTensorPtr& value)
m_upper_value = value;
}
void ov::descriptor::Tensor::set_value_label(const TensorLabel& value_label) {
const auto& labels_size = value_label.size();
if (labels_size == 0) {
m_value_label.clear();
} else {
NGRAPH_CHECK(m_partial_shape.is_static());
NGRAPH_CHECK(shape_size(m_partial_shape.to_shape()) == labels_size);
m_value_label = value_label;
}
}
const ov::Shape& ov::descriptor::Tensor::get_shape() const {
if (m_partial_shape.is_static()) {
if (m_shape_changed.load(std::memory_order_relaxed)) {

View File

@@ -782,6 +782,10 @@ bool ov::Node::evaluate_upper(const HostTensorVector& output_values) const {
OPENVINO_SUPPRESS_DEPRECATED_END
bool ov::Node::evaluate_label(TensorLabelVector& output_labels) const {
return false;
}
bool ov::Node::constant_fold(OutputVector& output_values, const OutputVector& input_values) {
OV_ITT_SCOPED_TASK(ov::itt::domains::nGraph, "Node::constant_fold");

View File

@@ -136,3 +136,36 @@ bool op::Concat::evaluate_lower(const HostTensorVector& output_values) const {
bool op::Concat::evaluate_upper(const HostTensorVector& output_values) const {
return default_upper_bound_evaluator(this, output_values);
}
bool op::Concat::evaluate_label(TensorLabelVector& output_labels) const {
const auto& inputs = input_values();
bool has_labeled_input = std::any_of(inputs.begin(), inputs.end(), [](const Output<Node>& out) {
const auto& labels = out.get_tensor().get_value_label();
return !labels.empty() && std::any_of(labels.begin(), labels.end(), [](const size_t& l) {
return l > 0;
});
});
if (!has_labeled_input)
return false;
HostTensorVector idx_inputs;
idx_inputs.reserve(inputs.size());
for (const auto& input : inputs) {
auto input_label = input.get_tensor().get_value_label();
if (input_label.empty()) {
const auto& shape = input.get_partial_shape();
// sanity check. at this point value propagation was successful
NGRAPH_CHECK(shape.is_static());
const auto& num_elements = shape_size(shape.to_shape());
input_label = TensorLabel(num_elements, 0);
}
const auto& constant = Constant::create(element::u64, input.get_shape(), input_label);
idx_inputs.push_back(std::make_shared<HostTensor>(constant));
}
const auto& output_tensor = std::make_shared<HostTensor>(element::u64, get_output_shape(0));
evaluate({output_tensor}, idx_inputs);
const auto& output_idxs = std::make_shared<Constant>(output_tensor)->cast_vector<size_t>();
output_labels[0] = output_idxs;
return true;
}

View File

@@ -228,3 +228,11 @@ bool op::v0::Convert::evaluate_lower(const HostTensorVector& output_values) cons
bool op::v0::Convert::evaluate_upper(const HostTensorVector& output_values) const {
return convert::evaluate_bound(this, output_values, true);
}
bool op::v0::Convert::evaluate_label(TensorLabelVector& output_labels) const {
const auto input_labels = get_input_tensor(0).get_value_label();
if (input_labels.empty())
return false;
output_labels[0] = input_labels;
return true;
}

View File

@@ -201,17 +201,23 @@ bool op::v1::Reshape::has_evaluate() const {
}
bool op::v1::Reshape::evaluate_lower(const HostTensorVector& output_values) const {
if (!input_value(1).get_tensor().has_and_set_bound())
if (!get_input_tensor(1).has_and_set_bound())
return false;
return default_lower_bound_evaluator(this, output_values);
}
bool op::v1::Reshape::evaluate_upper(const HostTensorVector& output_values) const {
if (!input_value(1).get_tensor().has_and_set_bound())
if (!get_input_tensor(1).has_and_set_bound())
return false;
return default_upper_bound_evaluator(this, output_values);
}
bool op::v1::Reshape::evaluate_label(TensorLabelVector& output_labels) const {
if (!get_input_tensor(1).has_and_set_bound())
return false;
return default_label_evaluator(this, output_labels);
}
bool op::v1::Reshape::constant_fold(OutputVector& output_values, const OutputVector& inputs_values) {
if (get_output_partial_shape(0).is_dynamic()) {
return false;

View File

@@ -5,6 +5,7 @@
#include "ngraph/op/shape_of.hpp"
#include <algorithm>
#include <dimension_tracker.hpp>
#include <ngraph/validation_util.hpp>
#include <vector>
@@ -149,6 +150,20 @@ bool evaluate_bound_shape(const Node* shape_of_node, const HostTensorVector& out
}
return true;
}
bool evaluate_label(const Node* shape_of_node, TensorLabelVector& output_labels) {
const auto& shape = shape_of_node->get_input_partial_shape(0);
NGRAPH_CHECK(shape.rank().is_static()); // sanity check. at this point value propagation was successful
output_labels[0].reserve(shape.size());
bool label_is_set = false;
for (const auto& d : shape) {
const auto& label = ov::DimensionTracker::get_label(d);
if (label)
label_is_set = true;
output_labels[0].push_back(label);
}
return label_is_set;
}
} // namespace
} // namespace shape_of
@@ -181,6 +196,10 @@ bool op::v3::ShapeOf::evaluate_upper(const HostTensorVector& output_values) cons
return shape_of::evaluate_bound_shape(this, output_values, true);
}
bool op::v3::ShapeOf::evaluate_label(TensorLabelVector& output_labels) const {
return shape_of::evaluate_label(this, output_labels);
}
bool op::v3::ShapeOf::constant_fold(OutputVector& output_values, const OutputVector& input_values) {
OV_ITT_SCOPED_TASK(ov::itt::domains::nGraph, "op::v3::ShapeOf::constant_fold");
if (get_rt_info().count(ov::pass::DisableConstantFolding::get_type_info_static()))
@@ -254,3 +273,7 @@ bool op::v0::ShapeOf::evaluate_lower(const HostTensorVector& output_values) cons
bool op::v0::ShapeOf::evaluate_upper(const HostTensorVector& output_values) const {
return shape_of::evaluate_bound_shape(this, output_values, true);
}
bool op::v0::ShapeOf::evaluate_label(TensorLabelVector& output_labels) const {
return shape_of::evaluate_label(this, output_labels);
}

View File

@@ -395,3 +395,35 @@ bool op::v8::Slice::evaluate(const HostTensorVector& outputs, const HostTensorVe
axes);
return true;
}
namespace {
bool slice_input_check(const ov::Node* node) {
if (!node->get_input_tensor(1).has_and_set_bound())
return false;
if (!node->get_input_tensor(2).has_and_set_bound())
return false;
if (!node->get_input_tensor(3).has_and_set_bound())
return false;
if (node->get_input_size() == 5 && !node->get_input_tensor(4).has_and_set_bound())
return false;
return true;
}
} // namespace
bool op::v8::Slice::evaluate_lower(const HostTensorVector& output_values) const {
if (!slice_input_check(this))
return false;
return default_lower_bound_evaluator(this, output_values);
}
bool op::v8::Slice::evaluate_upper(const HostTensorVector& output_values) const {
if (!slice_input_check(this))
return false;
return default_upper_bound_evaluator(this, output_values);
}
bool op::v8::Slice::evaluate_label(TensorLabelVector& output_labels) const {
if (!slice_input_check(this))
return false;
return default_label_evaluator(this, output_labels);
}

View File

@@ -262,6 +262,12 @@ bool op::v0::Squeeze::evaluate_upper(const HostTensorVector& output_values) cons
return default_upper_bound_evaluator(this, output_values);
}
bool op::v0::Squeeze::evaluate_label(TensorLabelVector& output_labels) const {
if (get_input_size() > 1 && !get_input_tensor(1).has_and_set_bound())
return false;
return default_label_evaluator(this, output_labels);
}
bool op::v0::Squeeze::constant_fold(OutputVector& output_values, const OutputVector& inputs_values) {
NGRAPH_OP_SCOPE(v0_Squeeze_constant_fold);
if (get_output_partial_shape(0).is_dynamic()) {

View File

@@ -241,16 +241,29 @@ bool op::v1::StridedSlice::has_evaluate() const {
return get_input_size() == 4;
}
namespace {
bool strided_slice_input_check(const ov::Node* node) {
if (!node->get_input_tensor(1).has_and_set_bound() || !node->get_input_tensor(2).has_and_set_bound() ||
!node->get_input_tensor(3).has_and_set_bound())
return false;
return true;
}
} // namespace
bool op::v1::StridedSlice::evaluate_lower(const HostTensorVector& output_values) const {
if (!input_value(1).get_tensor().has_and_set_bound() || !input_value(2).get_tensor().has_and_set_bound() ||
!input_value(3).get_tensor().has_and_set_bound())
if (!strided_slice_input_check(this))
return false;
return default_lower_bound_evaluator(this, output_values);
}
bool op::v1::StridedSlice::evaluate_upper(const HostTensorVector& output_values) const {
if (!input_value(1).get_tensor().has_and_set_bound() || !input_value(2).get_tensor().has_and_set_bound() ||
!input_value(3).get_tensor().has_and_set_bound())
if (!strided_slice_input_check(this))
return false;
return default_upper_bound_evaluator(this, output_values);
}
bool op::v1::StridedSlice::evaluate_label(TensorLabelVector& output_labels) const {
if (!strided_slice_input_check(this))
return false;
return default_label_evaluator(this, output_labels);
}

View File

@@ -150,17 +150,23 @@ bool op::v0::Unsqueeze::has_evaluate() const {
}
bool op::v0::Unsqueeze::evaluate_lower(const HostTensorVector& output_values) const {
if (!input_value(1).get_tensor().has_and_set_bound())
if (!get_input_tensor(1).has_and_set_bound())
return false;
return default_lower_bound_evaluator(this, output_values);
}
bool op::v0::Unsqueeze::evaluate_upper(const HostTensorVector& output_values) const {
if (!input_value(1).get_tensor().has_and_set_bound())
if (!get_input_tensor(1).has_and_set_bound())
return false;
return default_upper_bound_evaluator(this, output_values);
}
bool op::v0::Unsqueeze::evaluate_label(TensorLabelVector& output_labels) const {
if (!get_input_tensor(1).has_and_set_bound())
return false;
return default_label_evaluator(this, output_labels);
}
bool op::v0::Unsqueeze::constant_fold(OutputVector& output_values, const OutputVector& inputs_values) {
if (get_output_partial_shape(0).is_dynamic()) {
return false;

View File

@@ -241,17 +241,23 @@ bool ov::op::util::GatherBase::evaluate(const HostTensorVector& outputs, const H
}
bool ov::op::util::GatherBase::evaluate_lower(const HostTensorVector& output_values) const {
if (!input_value(1).get_tensor().has_and_set_bound() || !input_value(2).get_tensor().has_and_set_bound())
if (!get_input_tensor(1).has_and_set_bound() || !get_input_tensor(2).has_and_set_bound())
return false;
return ngraph::default_lower_bound_evaluator(this, output_values);
}
bool ov::op::util::GatherBase::evaluate_upper(const HostTensorVector& output_values) const {
if (!input_value(1).get_tensor().has_and_set_bound() || !input_value(2).get_tensor().has_and_set_bound())
if (!get_input_tensor(1).has_and_set_bound() || !get_input_tensor(2).has_and_set_bound())
return false;
return ngraph::default_upper_bound_evaluator(this, output_values);
}
bool ov::op::util::GatherBase::evaluate_label(TensorLabelVector& output_labels) const {
if (!get_input_tensor(1).has_and_set_bound() || !get_input_tensor(2).has_and_set_bound())
return false;
return default_label_evaluator(this, output_labels);
}
bool ov::op::util::GatherBase::constant_fold(OutputVector& output_values, const OutputVector& input_values) {
// try the regular constant folding just for the Gather node
if (Node::constant_fold(output_values, input_values)) {

View File

@@ -5,6 +5,7 @@
#include "ngraph/validation_util.hpp"
#include <algorithm>
#include <dimension_tracker.hpp>
#include <ngraph/ops.hpp>
#include <ngraph/rt_info.hpp>
#include <numeric>
@@ -1243,6 +1244,8 @@ HostTensorPtr evaluate_bound(const Output<Node>& output, bool is_upper) {
outputs.push_back(std::make_shared<HostTensor>(out));
if (is_upper ? node->evaluate_upper(outputs) : node->evaluate_lower(outputs)) {
const auto& input_values = node->input_values();
TensorLabelVector output_labels(outputs.size());
bool same_inputs = std::all_of(input_values.begin(), input_values.end(), [](const Output<Node>& input) {
return input.get_tensor().has_and_set_bound();
});
@@ -1253,6 +1256,10 @@ HostTensorPtr evaluate_bound(const Output<Node>& output, bool is_upper) {
if ((same_inputs || !is_upper) && node->get_output_tensor(i).get_lower_value() == nullptr)
node->get_output_tensor(i).set_lower_value(outputs[i]);
}
if (node->evaluate_label(output_labels))
for (size_t i = 0; i < outputs.size(); ++i)
node->get_output_tensor(i).set_value_label(output_labels[i]);
for (const auto& input : input_values)
if (input.get_target_inputs().size() == 1)
input.get_tensor().invalidate_values();
@@ -1290,6 +1297,9 @@ bool ov::evaluate_as_partial_shape(const Output<Node>& output, PartialShape& psh
auto lower_bound = std::make_shared<op::v0::Constant>(lb)->cast_vector<int64_t>();
auto upper_bound = std::make_shared<op::v0::Constant>(ub)->cast_vector<int64_t>();
NGRAPH_CHECK(lower_bound.size() == upper_bound.size());
const TensorLabel& labels = output.get_tensor().get_value_label();
NGRAPH_CHECK(labels.empty() || lower_bound.size() == labels.size());
vector<Dimension> resulting_pshape(lower_bound.size());
for (size_t i = 0; i < lower_bound.size(); ++i) {
auto low = lower_bound[i], up = upper_bound[i];
@@ -1301,6 +1311,8 @@ bool ov::evaluate_as_partial_shape(const Output<Node>& output, PartialShape& psh
low = std::numeric_limits<std::int64_t>::max();
}
resulting_pshape[i] = {low, up};
if (!labels.empty() && labels[i])
ov::DimensionTracker::set_label(resulting_pshape[i], labels[i]);
}
pshape = PartialShape(resulting_pshape);
shape_defined = true;
@@ -1308,6 +1320,42 @@ bool ov::evaluate_as_partial_shape(const Output<Node>& output, PartialShape& psh
return shape_defined;
}
bool ov::default_label_evaluator(const Node* node, TensorLabelVector& output_labels) {
NGRAPH_CHECK(node->outputs().size() == 1);
const auto& input_values = node->input_values();
TensorLabel input_labels;
HostTensorVector input_tensors(input_values.size());
for (size_t i = 0; i < input_values.size(); ++i) {
const auto& input = input_values[i];
if (i != 0)
if (input.get_tensor().has_and_set_bound())
input_tensors[i] = input.get_tensor().get_lower_value();
else
return false;
else {
input_labels = input.get_tensor().get_value_label();
bool no_labels = std::all_of(input_labels.begin(), input_labels.end(), [](const size_t& l) {
return l == 0;
});
if (input_labels.empty() || no_labels)
return false;
auto labels_constant = op::v0::Constant::create(ov::element::u64, input.get_shape(), input_labels);
auto idxs_htp = std::make_shared<HostTensor>(labels_constant);
input_tensors[i] = idxs_htp;
}
}
// inputs are finalized
const auto& output = std::make_shared<HostTensor>(element::u64, node->get_output_partial_shape(0));
if (!node->evaluate({output}, input_tensors))
return false;
output_labels[0] = std::make_shared<op::v0::Constant>(output)->cast_vector<size_t>();
return true;
}
inline bool default_bound_evaluator(const Node* node, const HostTensorVector& output_values, bool is_upper) {
HostTensorVector input_tensors;
for (const auto& input : node->input_values()) {

View File

@@ -2,6 +2,8 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <dimension_tracker.hpp>
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "ngraph/opsets/opset6.hpp"
@@ -17,6 +19,29 @@ template <typename T>
class BroadcastTests : public ::testing::Test {};
TYPED_TEST_SUITE_P(BroadcastTests);
TYPED_TEST_P(BroadcastTests, broadcast_dynamic_value_propagation) {
Dimension marked = Dimension(3);
ov::DimensionTracker::set_label(marked, 10);
PartialShape target = PartialShape{1, 2, marked, 4};
auto param = make_shared<op::Parameter>(element::f32, Shape{1, 1});
auto param_1 = make_shared<op::Parameter>(element::f32, target);
auto shape = make_shared<op::ShapeOf>(param_1);
auto indices = op::Constant::create(element::i32, {}, {2});
auto axis = op::Constant::create(element::i32, {1}, {0});
auto gather = make_shared<op::v1::Gather>(shape, indices, axis);
auto unsqueeze = make_shared<op::v0::Unsqueeze>(gather, axis);
auto five = op::Constant::create(element::i64, {1}, {5});
auto target_shape = std::make_shared<op::Concat>(OutputVector{unsqueeze, five}, 0);
auto bc = make_shared<TypeParam>(param, target_shape);
ASSERT_EQ(bc->get_element_type(), element::f32);
ASSERT_EQ(bc->get_shape(), (Shape{3, 5}));
ASSERT_EQ(ov::DimensionTracker::get_label(bc->get_output_partial_shape(0)[0]), 10);
}
TYPED_TEST_P(BroadcastTests, broadcast_numpy) {
auto param = make_shared<op::Parameter>(element::f32, Shape{3, 1});
auto target_shape = op::Constant::create<int64_t>(element::i64, Shape{3}, {2, 3, 6});
@@ -550,7 +575,8 @@ REGISTER_TYPED_TEST_SUITE_P(BroadcastTests,
broadcast_numpy_input_target_shape_static_rank,
broadcast_numpy_input_static_shape,
broadcast_numpy_input_partially_dynamic,
broadcast_numpy_static_dims_incorrect);
broadcast_numpy_static_dims_incorrect,
broadcast_dynamic_value_propagation);
typedef ::testing::Types<op::v1::Broadcast, op::v3::Broadcast> BroadcastTypes;
// the last empty argument resolves compiler warning on MAC:

View File

@@ -2,6 +2,8 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <dimension_tracker.hpp>
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "util/type_prop.hpp"
@@ -285,3 +287,69 @@ TEST(type_prop, concat_partial_negative_axis_incorrect) {
FAIL() << "Deduced type check failed for unexpected reason";
}
}
TEST(type_prop, concat_dynamic_value_and_label_propagation) {
Dimension marked_0 = Dimension(3);
ov::DimensionTracker::set_label(marked_0, 10);
PartialShape target_0 = PartialShape{marked_0, 4};
Dimension marked_1 = Dimension(5);
ov::DimensionTracker::set_label(marked_1, 15);
PartialShape target_1 = PartialShape{4, marked_1, 9};
auto param = make_shared<op::Parameter>(element::f32, Shape{1});
auto param_0 = make_shared<op::Parameter>(element::f32, target_0);
auto shape_0 = make_shared<op::ShapeOf>(param_0);
auto param_1 = make_shared<op::Parameter>(element::f32, target_1);
auto shape_1 = make_shared<op::ShapeOf>(param_1);
auto five = op::Constant::create(element::i64, {1}, {5});
auto target_shape = std::make_shared<op::Concat>(OutputVector{shape_0, five, shape_1}, 0);
auto bc = make_shared<op::v1::Broadcast>(param, target_shape);
ASSERT_EQ(bc->get_shape(), (Shape{3, 4, 5, 4, 5, 9}));
const auto& output_shape = bc->get_output_partial_shape(0);
ASSERT_EQ(ov::DimensionTracker::get_label(output_shape[0]), 10);
ASSERT_EQ(ov::DimensionTracker::get_label(output_shape[1]), 0);
ASSERT_EQ(ov::DimensionTracker::get_label(output_shape[2]), 0);
ASSERT_EQ(ov::DimensionTracker::get_label(output_shape[3]), 0);
ASSERT_EQ(ov::DimensionTracker::get_label(output_shape[4]), 15);
ASSERT_EQ(ov::DimensionTracker::get_label(output_shape[5]), 0);
}
TEST(type_prop, concat_dynamic_value_and_label_propagation_1) {
Dimension marked_0 = Dimension(3);
ov::DimensionTracker::set_label(marked_0, 1000);
PartialShape target_0 = PartialShape{marked_0, 4};
Dimension marked_1 = Dimension(5);
ov::DimensionTracker::set_label(marked_1, 1500);
PartialShape target_1 = PartialShape{4, marked_1, 9};
auto param = make_shared<op::Parameter>(element::f32, Shape{1});
auto param_0 = make_shared<op::Parameter>(element::f32, target_0);
auto shape_0 = make_shared<op::ShapeOf>(param_0);
auto convert_0 = make_shared<op::Convert>(shape_0, element::i8);
auto param_1 = make_shared<op::Parameter>(element::f32, target_1);
auto shape_1 = make_shared<op::ShapeOf>(param_1);
auto convert_1 = make_shared<op::Convert>(shape_1, element::i8);
auto five = op::Constant::create(element::i8, {1}, {5});
auto target_shape = std::make_shared<op::Concat>(OutputVector{convert_0, five, convert_1}, 0);
auto convert = make_shared<op::Convert>(target_shape, element::i64);
auto bc = make_shared<op::v1::Broadcast>(param, target_shape);
ASSERT_EQ(bc->get_shape(), (Shape{3, 4, 5, 4, 5, 9}));
const auto& output_shape = bc->get_output_partial_shape(0);
ASSERT_EQ(ov::DimensionTracker::get_label(output_shape[0]), 1000);
ASSERT_EQ(ov::DimensionTracker::get_label(output_shape[1]), 0);
ASSERT_EQ(ov::DimensionTracker::get_label(output_shape[2]), 0);
ASSERT_EQ(ov::DimensionTracker::get_label(output_shape[3]), 0);
ASSERT_EQ(ov::DimensionTracker::get_label(output_shape[4]), 1500);
ASSERT_EQ(ov::DimensionTracker::get_label(output_shape[5]), 0);
}

View File

@@ -2,6 +2,8 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <dimension_tracker.hpp>
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "util/type_prop.hpp"
@@ -16,3 +18,23 @@ TEST(type_prop, convert_deduce) {
ASSERT_EQ(c->get_element_type(), element::i32);
ASSERT_EQ(c->get_shape(), (Shape{2, 3, 4}));
}
TEST(type_prop, convert_dynamic_value_and_label_propagation) {
Dimension marked_0 = Dimension(3);
ov::DimensionTracker::set_label(marked_0, 500);
PartialShape target_0 = PartialShape{marked_0, 4};
auto param = make_shared<op::Parameter>(element::f32, Shape{1});
auto param_0 = make_shared<op::Parameter>(element::f32, target_0);
auto shape_0 = make_shared<op::ShapeOf>(param_0);
auto convert_0 = make_shared<op::Convert>(shape_0, element::i8);
auto convert_1 = make_shared<op::Convert>(convert_0, element::i64);
auto bc = make_shared<op::v1::Broadcast>(param, convert_1);
ASSERT_EQ(bc->get_shape(), (Shape{3, 4}));
const auto& output_shape = bc->get_output_partial_shape(0);
ASSERT_EQ(ov::DimensionTracker::get_label(output_shape[0]), 500);
ASSERT_EQ(ov::DimensionTracker::get_label(output_shape[1]), 0);
}

View File

@@ -2,6 +2,8 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <dimension_tracker.hpp>
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "util/type_prop.hpp"
@@ -110,6 +112,28 @@ TEST(type_prop, gather_v1_negative_axis) {
ASSERT_EQ(gather_v1->get_axis(), 1);
}
TEST(type_prop, gather_1_dynamic_value_and_label_propagation) {
Dimension marked_0 = Dimension(3);
ov::DimensionTracker::set_label(marked_0, 10);
PartialShape target_0 = PartialShape{marked_0, 4};
auto param = std::make_shared<op::Parameter>(element::f32, Shape{1});
auto param_0 = std::make_shared<op::Parameter>(element::f32, target_0);
auto shape_0 = std::make_shared<op::ShapeOf>(param_0);
const auto& et = element::i64;
std::vector<int64_t> zero{0};
const auto indices = std::make_shared<op::v0::Constant>(et, Shape{zero.size()}, zero);
const auto axis = std::make_shared<op::v0::Constant>(et, Shape{}, zero);
const auto gather = std::make_shared<op::v1::Gather>(shape_0, indices, axis);
auto bc = std::make_shared<op::v1::Broadcast>(param, gather);
ASSERT_EQ(bc->get_shape(), (Shape{3}));
const auto& output_shape = bc->get_output_partial_shape(0);
ASSERT_EQ(ov::DimensionTracker::get_label(output_shape[0]), 10);
}
// ------------------------------ V7 ------------------------------
TEST(type_prop, gather_7_axis_0) {
@@ -301,6 +325,28 @@ TEST(type_prop, gather_7_axis_not_set_positive_batch_dims) {
ASSERT_EQ(G->get_output_partial_shape(0), out_shape);
}
TEST(type_prop, gather_7_dynamic_value_and_label_propagation) {
Dimension marked_0 = Dimension(3);
ov::DimensionTracker::set_label(marked_0, 10);
PartialShape target_0 = PartialShape{marked_0, 4};
auto param = std::make_shared<op::Parameter>(element::f32, Shape{1});
auto param_0 = std::make_shared<op::Parameter>(element::f32, target_0);
auto shape_0 = std::make_shared<op::ShapeOf>(param_0);
const auto& et = element::i64;
std::vector<int64_t> zero{0};
const auto indices = std::make_shared<op::v0::Constant>(et, Shape{zero.size()}, zero);
const auto axis = std::make_shared<op::v0::Constant>(et, Shape{}, zero);
const auto gather = std::make_shared<op::v7::Gather>(shape_0, indices, axis);
auto bc = std::make_shared<op::v1::Broadcast>(param, gather);
ASSERT_EQ(bc->get_shape(), (Shape{3}));
const auto& output_shape = bc->get_output_partial_shape(0);
ASSERT_EQ(ov::DimensionTracker::get_label(output_shape[0]), 10);
}
// --------------------- V7 Negative tests ------------------------------
TEST(type_prop, gather_7_incorrect_axis_shape) {
@@ -634,6 +680,28 @@ TEST(type_prop, gather_v8_axis_not_set_positive_batch_dims) {
ASSERT_EQ(G->get_output_partial_shape(0), out_shape);
}
TEST(type_prop, gather_8_dynamic_value_and_label_propagation) {
Dimension marked_0 = Dimension(3);
ov::DimensionTracker::set_label(marked_0, 10);
PartialShape target_0 = PartialShape{marked_0, 4};
auto param = std::make_shared<op::Parameter>(element::f32, Shape{1});
auto param_0 = std::make_shared<op::Parameter>(element::f32, target_0);
auto shape_0 = std::make_shared<op::ShapeOf>(param_0);
const auto& et = element::i64;
std::vector<int64_t> zero{0};
const auto indices = std::make_shared<op::v0::Constant>(et, Shape{zero.size()}, zero);
const auto axis = std::make_shared<op::v0::Constant>(et, Shape{}, zero);
const auto gather = std::make_shared<op::v8::Gather>(shape_0, indices, axis);
auto bc = std::make_shared<op::v1::Broadcast>(param, gather);
ASSERT_EQ(bc->get_shape(), (Shape{3}));
const auto& output_shape = bc->get_output_partial_shape(0);
ASSERT_EQ(ov::DimensionTracker::get_label(output_shape[0]), 10);
}
// --------------------- V8 Negative tests ------------------------------
TEST(type_prop, gather_v8_incorrect_axis_shape) {

View File

@@ -2,6 +2,8 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <dimension_tracker.hpp>
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "util/type_prop.hpp"
@@ -606,3 +608,28 @@ TEST(type_prop, dynamic_shape_propagation_with_i32_precision) {
ASSERT_EQ(reshape->get_element_type(), element::f32);
ASSERT_EQ(reshape->get_output_partial_shape(0), (PartialShape{-1, -1, 1}));
}
TEST(type_prop, reshape_dynamic_value_and_label_propagation) {
Dimension marked_0 = Dimension(3);
ov::DimensionTracker::set_label(marked_0, 10);
PartialShape target_0 = PartialShape{marked_0, 4};
auto param = std::make_shared<op::Parameter>(element::f32, Shape{1});
auto param_0 = std::make_shared<op::Parameter>(element::f32, target_0);
auto shape_0 = std::make_shared<op::ShapeOf>(param_0);
const auto& et = element::i64;
std::vector<int64_t> zero{0};
const auto indices = std::make_shared<op::v0::Constant>(et, Shape{}, zero);
const auto axis = std::make_shared<op::v0::Constant>(et, Shape{}, zero);
const auto gather = std::make_shared<op::v7::Gather>(shape_0, indices, axis);
const auto output_pattern = std::make_shared<op::v0::Constant>(et, Shape{1}, std::vector<int64_t>{-1});
const auto unsqueeze = std::make_shared<op::v1::Reshape>(gather, output_pattern, false);
auto bc = std::make_shared<op::v1::Broadcast>(param, unsqueeze);
ASSERT_EQ(bc->get_shape(), (Shape{3}));
const auto& output_shape = bc->get_output_partial_shape(0);
ASSERT_EQ(ov::DimensionTracker::get_label(output_shape[0]), 10);
}

View File

@@ -2,6 +2,8 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <dimension_tracker.hpp>
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "util/type_prop.hpp"
@@ -101,3 +103,35 @@ TEST(type_prop, shape_of_output_type_v3) {
ASSERT_EQ(so->get_output_element_type(0), element::i32);
ASSERT_EQ(so->get_shape(), Shape{4});
}
TEST(type_prop, shape_of_1_dynamic_value_and_label_propagation) {
Dimension marked_0 = Dimension(3);
ov::DimensionTracker::set_label(marked_0, 10);
PartialShape target_0 = PartialShape{marked_0, 4};
auto param = std::make_shared<op::Parameter>(element::f32, Shape{1});
auto param_0 = std::make_shared<op::Parameter>(element::f32, target_0);
auto shape_0 = std::make_shared<op::ShapeOf>(param_0);
auto bc = std::make_shared<op::v1::Broadcast>(param, shape_0);
ASSERT_EQ(bc->get_shape(), (Shape{3, 4}));
const auto& output_shape = bc->get_output_partial_shape(0);
ASSERT_EQ(ov::DimensionTracker::get_label(output_shape[0]), 10);
}
TEST(type_prop, shape_of_3_dynamic_value_and_label_propagation) {
Dimension marked_0 = Dimension(3);
ov::DimensionTracker::set_label(marked_0, 10);
PartialShape target_0 = PartialShape{marked_0, 4};
auto param = std::make_shared<op::Parameter>(element::f32, Shape{1});
auto param_0 = std::make_shared<op::Parameter>(element::f32, target_0);
auto shape_0 = std::make_shared<op::v3::ShapeOf>(param_0);
auto bc = std::make_shared<op::v1::Broadcast>(param, shape_0);
ASSERT_EQ(bc->get_shape(), (Shape{3, 4}));
const auto& output_shape = bc->get_output_partial_shape(0);
ASSERT_EQ(ov::DimensionTracker::get_label(output_shape[0]), 10);
}

View File

@@ -2,6 +2,7 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <dimension_tracker.hpp>
#include <numeric>
#include "gtest/gtest.h"
@@ -1148,3 +1149,26 @@ TEST(type_prop, slice_v8_dynamic_rank_inputs) {
EXPECT_EQ(op->get_output_partial_shape(0), dyn_rank_shape);
}
TEST(type_prop, slice_dynamic_value_and_label_propagation) {
Dimension marked_0 = Dimension(3);
ov::DimensionTracker::set_label(marked_0, 10);
PartialShape target_0 = PartialShape{marked_0, 4};
auto param = std::make_shared<op::Parameter>(element::f32, Shape{1});
auto param_0 = std::make_shared<op::Parameter>(element::f32, target_0);
auto shape_0 = std::make_shared<op::ShapeOf>(param_0);
const auto& et = element::i64;
std::vector<int64_t> start_val{0}, stop_val{1}, step_val{1};
const auto start = std::make_shared<op::v0::Constant>(et, Shape{start_val.size()}, start_val);
const auto stop = std::make_shared<op::v0::Constant>(et, Shape{stop_val.size()}, stop_val);
const auto step = std::make_shared<op::v0::Constant>(et, Shape{step_val.size()}, step_val);
const auto slice = std::make_shared<op::v8::Slice>(shape_0, start, stop, step);
auto bc = std::make_shared<op::v1::Broadcast>(param, slice);
ASSERT_EQ(bc->get_shape(), (Shape{3}));
const auto& output_shape = bc->get_output_partial_shape(0);
ASSERT_EQ(ov::DimensionTracker::get_label(output_shape[0]), 10);
}

View File

@@ -2,6 +2,8 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <dimension_tracker.hpp>
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "util/type_prop.hpp"
@@ -159,3 +161,30 @@ TEST(type_prop, squeeze_scalar_axes) {
ASSERT_EQ(squeeze->get_element_type(), element::f32);
ASSERT_EQ(squeeze->get_shape(), (Shape{4, 1, 4, 1, 8}));
}
TEST(type_prop, squeeze_dynamic_value_and_label_propagation) {
Dimension marked_0 = Dimension(3);
ov::DimensionTracker::set_label(marked_0, 10);
PartialShape target_0 = PartialShape{marked_0, 4};
auto param = std::make_shared<op::Parameter>(element::f32, Shape{1});
auto param_0 = std::make_shared<op::Parameter>(element::f32, target_0);
auto shape_0 = std::make_shared<op::ShapeOf>(param_0);
const auto& et = element::i64;
std::vector<int64_t> zero{0};
const auto indices = std::make_shared<op::v0::Constant>(et, Shape{}, zero);
const auto axis = std::make_shared<op::v0::Constant>(et, Shape{}, zero);
const auto gather = std::make_shared<op::v7::Gather>(shape_0, indices, axis);
const auto axis_1 = std::make_shared<op::v0::Constant>(et, Shape{2}, std::vector<int64_t>{0, 1});
const auto unsqueeze = std::make_shared<op::v0::Unsqueeze>(gather, axis_1);
const auto squeeze = std::make_shared<op::v0::Squeeze>(unsqueeze, axis);
auto bc = std::make_shared<op::v1::Broadcast>(param, squeeze);
ASSERT_EQ(bc->get_shape(), (Shape{3}));
const auto& output_shape = bc->get_output_partial_shape(0);
ASSERT_EQ(ov::DimensionTracker::get_label(output_shape[0]), 10);
}

View File

@@ -2,6 +2,7 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <dimension_tracker.hpp>
#include <memory>
#include "gtest/gtest.h"
@@ -162,3 +163,31 @@ TEST(type_prop, strided_slice_reverse_out_of_bounds) {
Shape expected{3, 4, 5};
EXPECT_EQ(ss->get_output_shape(0), expected);
}
TEST(type_prop, strided_slice_dynamic_value_and_label_propagation) {
Dimension marked_0 = Dimension(3);
ov::DimensionTracker::set_label(marked_0, 10);
PartialShape target_0 = PartialShape{marked_0, 4};
auto param = std::make_shared<op::Parameter>(element::f32, Shape{1});
auto param_0 = std::make_shared<op::Parameter>(element::f32, target_0);
auto shape_0 = std::make_shared<op::ShapeOf>(param_0);
const auto& et = element::i64;
std::vector<int64_t> start_val{0}, stop_val{1}, step_val{1};
const auto start = std::make_shared<op::v0::Constant>(et, Shape{start_val.size()}, start_val);
const auto stop = std::make_shared<op::v0::Constant>(et, Shape{stop_val.size()}, stop_val);
const auto step = std::make_shared<op::v0::Constant>(et, Shape{step_val.size()}, step_val);
const auto slice = std::make_shared<op::v1::StridedSlice>(shape_0,
start,
stop,
step,
std::vector<int64_t>{0},
std::vector<int64_t>{0});
auto bc = std::make_shared<op::v1::Broadcast>(param, slice);
ASSERT_EQ(bc->get_shape(), (Shape{3}));
const auto& output_shape = bc->get_output_partial_shape(0);
ASSERT_EQ(ov::DimensionTracker::get_label(output_shape[0]), 10);
}

View File

@@ -2,6 +2,8 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <dimension_tracker.hpp>
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "util/type_prop.hpp"
@@ -67,4 +69,28 @@ TEST(type_prop, unsqueeze_dynamic_axes) {
auto unsqueeze = make_shared<op::v0::Unsqueeze>(param, axes_node);
ASSERT_EQ(unsqueeze->get_element_type(), element::f32);
ASSERT_EQ(unsqueeze->get_output_partial_shape(0), PartialShape::dynamic());
}
}
TEST(type_prop, unsqueeze_dynamic_value_and_label_propagation) {
Dimension marked_0 = Dimension(3);
ov::DimensionTracker::set_label(marked_0, 10);
PartialShape target_0 = PartialShape{marked_0, 4};
auto param = std::make_shared<op::Parameter>(element::f32, Shape{1});
auto param_0 = std::make_shared<op::Parameter>(element::f32, target_0);
auto shape_0 = std::make_shared<op::ShapeOf>(param_0);
const auto& et = element::i64;
std::vector<int64_t> zero{0};
const auto indices = std::make_shared<op::v0::Constant>(et, Shape{}, zero);
const auto axis = std::make_shared<op::v0::Constant>(et, Shape{}, zero);
const auto gather = std::make_shared<op::v7::Gather>(shape_0, indices, axis);
const auto unsqueeze = std::make_shared<op::v0::Unsqueeze>(gather, axis);
auto bc = std::make_shared<op::v1::Broadcast>(param, unsqueeze);
ASSERT_EQ(bc->get_shape(), (Shape{3}));
const auto& output_shape = bc->get_output_partial_shape(0);
ASSERT_EQ(ov::DimensionTracker::get_label(output_shape[0]), 10);
}