Take Jane's changes for Demension names

This commit is contained in:
Ilya Lavrenov 2021-04-27 16:47:09 +03:00
parent e995a8a216
commit 9f6c8fa5a6
8 changed files with 166 additions and 23 deletions

View File

@ -21,6 +21,8 @@ namespace ngraph
class HostTensor;
}
using HostTensorPtr = std::shared_ptr<runtime::HostTensor>;
using TensorLabel = std::vector<std::string>;
namespace descriptor
{
/// \brief Compile-time descriptor of a first-class value that is a tensor.
@ -53,7 +55,9 @@ namespace ngraph
void set_lower_value(const HostTensorPtr& value);
/// \brief sets upper bound value description
void set_upper_value(const HostTensorPtr& value);
/// \brief unsets bound value descriptions
/// \brief sets value label description
void set_value_label(const TensorLabel& value_label);
/// \brief unsets bound value descriptions and their labels
void invalidate_values();
const element::Type& get_element_type() const { return m_element_type; }
@ -63,6 +67,8 @@ namespace ngraph
HostTensorPtr get_lower_value() const { return m_lower_value; }
/// \brief gets upper bound value description
HostTensorPtr get_upper_value() const { return m_upper_value; }
/// \brief gets upper bound value description
TensorLabel get_value_label() const { return m_value_label; }
/// \brief checks if lower and upper bound are set and point to the same HostTensor
bool has_and_set_bound() const
{
@ -81,6 +87,7 @@ namespace ngraph
PartialShape m_partial_shape;
Node* m_node{nullptr};
HostTensorPtr m_lower_value, m_upper_value;
std::vector<std::string> m_value_label;
size_t m_node_output_number{0};
std::string m_name;
@ -89,5 +96,5 @@ namespace ngraph
NGRAPH_API
std::ostream& operator<<(std::ostream&, const ngraph::descriptor::Tensor&);
} // namespace descriptor
} // namespace ngraph
}
}

View File

@ -26,12 +26,12 @@ namespace ngraph
/// \brief Construct a static dimension.
/// \param dimension Value of the dimension.
Dimension(value_type dimension);
Dimension(value_type dimension, std::string name = "");
/// \brief Construct a dynamic dimension with bounded range
/// \param min_dimension The lower inclusive limit for the dimension
/// \param mas_dimension The upper inclusive limit for the dimension
Dimension(value_type min_dimension, value_type max_dimension);
Dimension(value_type min_dimension, value_type max_dimension, std::string name = "");
/// \brief Construct a dynamic dimension with range [0, ...]
Dimension() = default;
@ -61,6 +61,9 @@ namespace ngraph
/// \brief Return the interval of valid lengths
const Interval& get_interval() const { return m_dimension; }
Interval& get_interval() { return m_dimension; }
/// \brief Return the dimension name
const std::string& get_name() const { return m_name; }
/// \brief Check whether this dimension represents the same scheme as the argument (both
/// dynamic, or equal).
/// \param dim The other dimension to compare this dimension to.
@ -146,13 +149,15 @@ namespace ngraph
Dimension& operator&=(const Dimension& dim);
private:
Dimension(const Interval& interval)
Dimension(const Interval& interval, std::string name = "")
: m_dimension(interval)
, m_name(name)
{
}
// The actual numerical value of the dimension.
Interval m_dimension{};
std::string m_name;
};
/// \brief Insert a human-readable representation of a dimension into an output stream.
@ -163,4 +168,4 @@ namespace ngraph
/// Inserts the string `?` if `dimension` is dynamic; else inserts `dimension.get_length()`.
NGRAPH_API
std::ostream& operator<<(std::ostream& str, const Dimension& dimension);
} // namespace ngraph
}

View File

@ -55,6 +55,7 @@ namespace ngraph
using HostTensor = runtime::HostTensor;
using HostTensorPtr = std::shared_ptr<HostTensor>;
using HostTensorVector = std::vector<HostTensorPtr>;
using TensorLabelVector = std::vector<TensorLabel>;
namespace op
{
@ -199,6 +200,7 @@ namespace ngraph
const HostTensorVector& input_values) const;
virtual bool evaluate_lower(const HostTensorVector& output_values) const;
virtual bool evaluate_upper(const HostTensorVector& output_values) const;
virtual bool evaluate_label(TensorLabelVector& output_labels) const;
virtual bool constant_fold(OutputVector& output_values, const OutputVector& inputs_values);
/// \brief Decomposes the FusedOp into a sub-graph consisting of core ngraph ops
@ -670,7 +672,7 @@ namespace ngraph
{
}
};
} // namespace ngraph
}
#define NODE_VALIDATION_CHECK(node, ...) \
NGRAPH_CHECK_HELPER(::ngraph::NodeValidationFailure, (node), __VA_ARGS__)

View File

@ -277,6 +277,15 @@ namespace ngraph
/// \return boolean status if value evaluation was successful.
NGRAPH_API bool default_lower_bound_evaluator(const Node* node,
const HostTensorVector& output_values);
/// \brief Propagates value label from 0 input to the only output through an operation.
/// Not applicable for operations which require values interaction (example: mathematical
/// operations). Could be used for movement operations (example: gathering, shape change)
/// \param node Operation to be performed
/// \param output_labels Vector of TensorLabel objects representing resulting value labels
/// \return boolean status if label evaluation was successful.
NGRAPH_API bool default_label_evaluator(const Node* node, TensorLabelVector& output_labels);
/// \brief Estimates both bounds for node output tensors using both bounds of inputs. Works for
/// operations with two inputs (in_1 and in_2). Brute forces all the pairs of bounds for inputs
/// and evaluates all of them: {in_1_lower, in_2 lower}, {in_1_lower, in_2 upper}, {in_1_upper,

View File

@ -60,6 +60,7 @@ void descriptor::Tensor::invalidate_values()
{
m_upper_value = nullptr;
m_lower_value = nullptr;
m_value_label.clear();
}
void descriptor::Tensor::set_lower_value(const HostTensorPtr& value)
@ -78,6 +79,21 @@ void descriptor::Tensor::set_upper_value(const HostTensorPtr& value)
m_upper_value = value;
}
void descriptor::Tensor::set_value_label(const TensorLabel& value_label)
{
const auto& labels_size = value_label.size();
if (labels_size == 0)
{
m_value_label.clear();
}
else
{
NGRAPH_CHECK(m_partial_shape.is_static());
NGRAPH_CHECK(shape_size(m_partial_shape.to_shape()) == labels_size);
m_value_label = value_label;
}
}
const Shape& descriptor::Tensor::get_shape() const
{
if (m_partial_shape.is_static())
@ -96,6 +112,7 @@ size_t descriptor::Tensor::size() const
const bool bitwidth_less_than_byte = m_element_type.bitwidth() < 8;
if (bitwidth_less_than_byte)
{
// TODO consider caching this value
return ceil((1.0 * shape_size(get_shape()) * m_element_type.bitwidth()) / 8);
}
return shape_size(get_shape()) * m_element_type.size();

View File

@ -2,10 +2,7 @@
// SPDX-License-Identifier: Apache-2.0
//
#include <algorithm>
#include <iostream>
#include <limits>
#include <sstream>
#include "ngraph/dimension.hpp"
@ -13,6 +10,9 @@ using namespace ngraph;
std::ostream& ngraph::operator<<(std::ostream& str, const Dimension& dimension)
{
if (!dimension.get_name().empty())
str << dimension.get_name() << ":";
if (dimension.is_static())
{
return str << dimension.get_length();
@ -28,29 +28,41 @@ std::ostream& ngraph::operator<<(std::ostream& str, const Dimension& dimension)
}
}
Dimension::Dimension(value_type dimension)
Dimension::Dimension(value_type dimension, std::string name)
: m_dimension(dimension == -1 ? 0 : dimension, dimension == -1 ? Interval::s_max : dimension)
, m_name(name)
{
}
Dimension::Dimension(value_type min_dimension, value_type max_dimension)
Dimension::Dimension(value_type min_dimension, value_type max_dimension, std::string name)
: m_dimension(min_dimension == -1 ? 0 : min_dimension,
max_dimension == -1 ? Interval::s_max : max_dimension)
, m_name(name)
{
}
Dimension Dimension::operator+(const Dimension& dim) const
{
if (dim.m_dimension == 0 && dim.get_name().empty())
return *this;
else if (m_dimension == 0 && get_name().empty())
return dim;
return Dimension(m_dimension + dim.m_dimension);
}
Dimension Dimension::operator-(const Dimension& dim) const
{
if (dim.m_dimension == 0 && dim.get_name().empty())
return *this;
return Dimension(m_dimension - dim.m_dimension);
}
Dimension Dimension::operator*(const Dimension& dim) const
{
if (dim.m_dimension == 1 && dim.get_name().empty())
return *this;
else if (m_dimension == 1 && get_name().empty())
return dim;
return Dimension(m_dimension * dim.m_dimension);
}
@ -93,20 +105,46 @@ bool Dimension::merge(Dimension& dst, const Dimension d1, const Dimension d2)
{
return false;
}
dst = result;
std::string name;
if (d1 == d2 && d1.get_name() == d2.get_name())
name = d1.get_name();
dst = {result, name};
return true;
}
std::string broadcast_dimensions_name(const Dimension& d1, const Dimension& d2)
{
std::string name;
if (d1 == d2)
{
const auto& name_1 = d1.get_name();
const auto& name_2 = d2.get_name();
if (name_1 == name_2 || (!name_1.empty() && name_2.empty()))
name = name_1;
else if (name_1.empty() && !name_2.empty())
name = name_2;
return name;
}
const auto& one_dim = d1 == 1 ? d1 : (d2 == 1 ? d2 : -1);
const auto& other_dim = d1 == 1 ? d2 : (d2 == 1 ? d1 : -1); // it is not equal to 1
if (one_dim.is_dynamic())
return "";
return other_dim.get_name();
}
bool Dimension::broadcast_merge(Dimension& dst, const Dimension d1, const Dimension d2)
{
if (d1.m_dimension.size() == 1 && d1.m_dimension.get_min_val() == 1)
{
dst = d2;
dst =
Dimension(d2.get_min_length(), d2.get_max_length(), broadcast_dimensions_name(d1, d2));
return true;
}
if (d2.m_dimension.size() == 1 && d2.m_dimension.get_min_val() == 1)
{
dst = d1;
dst =
Dimension(d1.get_min_length(), d1.get_max_length(), broadcast_dimensions_name(d1, d2));
return true;
}
return merge(dst, d1, d2);
@ -127,7 +165,7 @@ namespace
{
return vt == Interval::s_max ? -1 : vt;
}
} // namespace
}
Dimension::value_type Dimension::get_max_length() const
{

View File

@ -552,7 +552,7 @@ namespace ngraph
{
ostream& operator<<(ostream& out, const Node& node) { return node.write_description(out, 1); }
ostream& operator<<(ostream& out, const Node* node) { return node->write_description(out, 1); }
} // namespace ngraph
}
std::ostream& Node::write_description(std::ostream& out, uint32_t depth) const
{
@ -971,6 +971,11 @@ bool Node::evaluate_upper(const HostTensorVector& output_values) const
return default_upper_bound_evaluator(this, output_values);
}
bool Node::evaluate_label(TensorLabelVector& output_labels) const
{
return false;
}
bool Node::constant_fold(OutputVector& output_values, const OutputVector& input_values)
{
OV_ITT_SCOPED_TASK(itt::domains::nGraph, "Node::constant_fold");

View File

@ -485,8 +485,8 @@ PartialShape ngraph::infer_batched_pooling_forward(const Node* node,
{
NODE_VALIDATION_CHECK(node,
data_batch_shape.rank().is_dynamic() ||
(data_batch_shape.rank().get_length() >= 3 &&
data_batch_shape.rank().get_length() <= 5),
data_batch_shape.rank().get_length() >= 3 &&
data_batch_shape.rank().get_length() <= 5,
"Data batch must have rank of at least 4 or 5 (one batch axis, ",
"one input-channel axis, and two or three spatial dimension) ",
"(data batch shape: ",
@ -1235,7 +1235,7 @@ namespace
}
vector<MaxValue> exec_nop(Node* node, vector<MaxValue>& inputs) { return {inputs.at(0)}; }
} // namespace
}
pair<bool, uint64_t> ngraph::maximum_value(const Output<Node>& value)
{
@ -1381,6 +1381,7 @@ HostTensorPtr evaluate_bound(const Output<Node>& output, bool is_upper)
outputs.push_back(std::make_shared<HostTensor>(out));
if (is_upper ? node->evaluate_upper(outputs) : node->evaluate_lower(outputs))
{
TensorLabelVector output_labels;
const auto& input_values = node->input_values();
bool same_inputs = std::all_of(
input_values.begin(), input_values.end(), [](const Output<Node>& input) {
@ -1388,14 +1389,19 @@ HostTensorPtr evaluate_bound(const Output<Node>& output, bool is_upper)
});
for (size_t i = 0; i < outputs.size(); ++i)
{
// TODO: should we skip setting value for tensors that have only one consumer?
if ((same_inputs || is_upper) &&
node->get_output_tensor(i).get_upper_value() == nullptr)
node->get_output_tensor(i).set_upper_value(outputs[i]);
if ((same_inputs || !is_upper) &&
node->get_output_tensor(i).get_lower_value() == nullptr)
node->get_output_tensor(i).set_lower_value(outputs[i]);
output_labels.push_back(TensorLabel());
}
if (node->evaluate_label(output_labels))
for (size_t i = 0; i < outputs.size(); ++i)
node->get_output_tensor(i).set_value_label(output_labels[i]);
// invalidation of previously calculated and unused values
for (const auto& input : input_values)
if (input.get_target_inputs().size() == 1)
input.get_tensor().invalidate_values();
@ -1439,10 +1445,13 @@ bool ngraph::evaluate_as_partial_shape(const Output<Node>& output, PartialShape&
const auto upper_bound = std::make_shared<op::Constant>(ub)->cast_vector<int64_t>();
NGRAPH_CHECK(lower_bound.size() == upper_bound.size());
vector<Dimension> resulting_pshape(lower_bound.size());
const TensorLabel& labels = output.get_tensor().get_value_label();
NGRAPH_CHECK(lower_bound.size() == labels.size() || labels.empty());
for (size_t i = 0; i < lower_bound.size(); ++i)
{
NGRAPH_CHECK(lower_bound[i] >= 0 && upper_bound[i] >= 0);
resulting_pshape[i] = {lower_bound[i], upper_bound[i]};
const auto& name = labels.empty() ? "" : labels[i];
resulting_pshape[i] = {lower_bound[i], upper_bound[i], name};
}
pshape = PartialShape(resulting_pshape);
shape_defined = true;
@ -1464,6 +1473,57 @@ bool default_bound_evaluator(const Node* node, const HostTensorVector& output_va
return node->evaluate(output_values, input_tensors);
}
bool ngraph::default_label_evaluator(const Node* node, TensorLabelVector& output_labels)
{
NGRAPH_CHECK(node->outputs().size() == 1);
const auto& input_values = node->input_values();
TensorLabel input_labels;
HostTensorVector input_tensors(input_values.size());
for (size_t i = 0; i < input_values.size(); ++i)
{
const auto& input = input_values[i];
if (i != 0)
if (input.get_tensor().has_and_set_bound())
input_tensors[i] = input.get_tensor().get_lower_value();
else
return false;
else
{
input_labels = input.get_tensor().get_value_label();
bool no_labels = std::all_of(input_labels.begin(),
input_labels.end(),
[](const std::string& l) { return l.empty(); });
if (input_labels.empty() || no_labels)
return false;
std::vector<size_t> idxs(input_labels.size());
std::iota(idxs.begin(), idxs.end(), 0);
auto idxs_constant =
op::Constant::create(input.get_element_type(), input.get_shape(), idxs);
auto idxs_htp = std::make_shared<HostTensor>(idxs_constant);
input_tensors[i] = idxs_htp;
}
}
// inputs are finalized
const auto& single_output = node->output(0);
const auto& output = std::make_shared<HostTensor>(single_output.get_element_type(),
single_output.get_partial_shape());
if (!node->evaluate({output}, input_tensors))
return false;
const auto& label_map = std::make_shared<op::Constant>(output)->cast_vector<size_t>();
output_labels[0].resize(label_map.size());
for (size_t i = 0; i < label_map.size(); ++i)
{
const auto& mapped_idx = label_map[i];
output_labels[0][i] = mapped_idx > input_labels.size() ? "" : input_labels[mapped_idx];
}
return true;
}
bool ngraph::default_lower_bound_evaluator(const Node* node, const HostTensorVector& output_values)
{
return default_bound_evaluator(node, output_values, false);