nGraph value propagation (#3368)
* partial value and shape propagation * TopK k propagation * stylish commit * style again * Precalc Folding and broadcasting fix * style * Self review * forgetting the unused values * deleting unused variables * Review * self review * disaster mistake fixed * tests corrected * mod to be inferred while creation * LPT test fix * tests fixes * fix * tests * dynamic backend fix * comments * disable propagation
This commit is contained in:
parent
b97a986048
commit
f1ffe4f9fd
@ -333,6 +333,7 @@ CNNNetworkNGraphImpl::reshape(const std::map<std::string, std::vector<size_t>>&
|
||||
|
||||
auto params = _ngraph_function->get_parameters();
|
||||
|
||||
bool parameter_replaced = false;
|
||||
for (size_t i = 0; i < params.size(); i++) {
|
||||
const auto& param = params[i];
|
||||
if (inputShapes.find(param->get_friendly_name()) == inputShapes.end())
|
||||
@ -341,23 +342,35 @@ CNNNetworkNGraphImpl::reshape(const std::map<std::string, std::vector<size_t>>&
|
||||
auto newParam = std::make_shared<::ngraph::op::Parameter>(param->get_element_type(), shape);
|
||||
newParam->set_friendly_name(param->get_friendly_name());
|
||||
_ngraph_function->replace_parameter(i, newParam);
|
||||
parameter_replaced = true;
|
||||
}
|
||||
_ngraph_function->validate_nodes_and_infer_types();
|
||||
if (parameter_replaced)
|
||||
_ngraph_function->validate_nodes_and_infer_types();
|
||||
|
||||
const auto& results = _ngraph_function->get_results();
|
||||
bool outputs_are_static = all_of(
|
||||
begin(results), end(results),
|
||||
[](const std::shared_ptr<ngraph::Node>& n){ return n->get_output_partial_shape(0).is_static(); });
|
||||
|
||||
{
|
||||
auto specialized_ngraph_function = cloneFunction(false);
|
||||
{
|
||||
OV_ITT_SCOPED_TASK(itt::domains::IE, "CNNNetworkNGraphImpl::ConvertToLegacy");
|
||||
::ngraph::pass::Manager manager;
|
||||
// resolves dynamism by replacing dynamic operation with static version
|
||||
manager.register_pass<::ngraph::pass::ConvertNMS5ToLegacyMatcher>(false);
|
||||
manager.register_pass<::ngraph::pass::ConstantFolding>();
|
||||
// OneHotToLegacy changes output precision
|
||||
manager.register_pass<::ngraph::pass::ConvertOneHotToOneHotIEMatcher>()->detect_output_type(
|
||||
specialized_ngraph_function);
|
||||
manager.run_passes(specialized_ngraph_function);
|
||||
shared_ptr<Function> specialized_ngraph_function = nullptr;
|
||||
if (outputs_are_static) {
|
||||
specialized_ngraph_function = _ngraph_function;
|
||||
} else {
|
||||
specialized_ngraph_function = cloneFunction(false);
|
||||
{
|
||||
OV_ITT_SCOPED_TASK(itt::domains::IE, "CNNNetworkNGraphImpl::ConvertToLegacy");
|
||||
::ngraph::pass::Manager manager;
|
||||
// resolves dynamism by replacing dynamic operation with static version
|
||||
manager.register_pass<::ngraph::pass::ConvertNMS5ToLegacyMatcher>(false);
|
||||
manager.register_pass<::ngraph::pass::ConstantFolding>();
|
||||
// OneHotToLegacy changes output precision
|
||||
manager.register_pass<::ngraph::pass::ConvertOneHotToOneHotIEMatcher>()->detect_output_type(
|
||||
specialized_ngraph_function);
|
||||
manager.run_passes(specialized_ngraph_function);
|
||||
}
|
||||
specialized_ngraph_function->validate_nodes_and_infer_types();
|
||||
}
|
||||
specialized_ngraph_function->validate_nodes_and_infer_types();
|
||||
|
||||
#if 0
|
||||
for (const auto &op : specialized_ngraph_function->get_ordered_ops()) {
|
||||
|
@ -46,7 +46,8 @@ ngraph::pass::ConvertOneHotToOneHotIEMatcher::ConvertOneHotToOneHotIEMatcher() {
|
||||
// insert Convert layer to cast output to a correct data type defined by the on/off values
|
||||
if (on_value_node->get_element_type() != m_output_type) {
|
||||
auto convert = std::make_shared<ngraph::opset1::Convert>(one_hot_ie, on_value_node->get_element_type());
|
||||
convert->set_friendly_name(one_hot->get_friendly_name() + "/Convert");
|
||||
convert->set_friendly_name(one_hot->get_friendly_name());
|
||||
one_hot->set_friendly_name(one_hot->get_friendly_name() + "/FloatOutput");
|
||||
ngraph::copy_runtime_info(one_hot, {one_hot_ie, convert});
|
||||
ngraph::replace_node(m.get_match_root(), convert);
|
||||
} else {
|
||||
|
@ -63,6 +63,7 @@ bool ngraph::pass::CommonOptimizations::run_on_function(std::shared_ptr<ngraph::
|
||||
|
||||
// This pass must be called first in pipeline
|
||||
manager.register_pass<ngraph::pass::InitNodeInfo>();
|
||||
manager.register_pass<ngraph::pass::ConstantFolding>();
|
||||
manager.register_pass<ngraph::pass::RemoveFilteringBoxesBySize>(); // Resolves dynamism (replaces NonZero), CF needed
|
||||
|
||||
// TODO: move to KMB
|
||||
|
@ -211,7 +211,8 @@ protected:
|
||||
}
|
||||
|
||||
const auto broadcast = std::make_shared<ngraph::opset5::Broadcast>(broadcastInput, shapeOfNode, ngraph::op::BroadcastType::BIDIRECTIONAL);
|
||||
|
||||
// tests are invalid -- output shape of broadcast gets fully deduced and transformations stop working for this particular graph
|
||||
broadcast->set_output_type(0, broadcast->get_output_element_type(0), ngraph::PartialShape::dynamic(broadcast->get_output_partial_shape(0).rank()));
|
||||
auto function = std::make_shared<ngraph::Function>(
|
||||
ngraph::NodeVector{broadcast},
|
||||
params,
|
||||
|
@ -171,7 +171,7 @@ std::shared_ptr<ngraph::Function> FakeQuantizePrecisionSelectionFunction::getRef
|
||||
} else {
|
||||
// TODO: potential workaround for the same case:
|
||||
// openvino\inference-engine\tests\ngraph_functions\src\low_precision_transformations\concat_function.cpp, line #496
|
||||
// branch1Pooling->set_output_type(0, values.fakeQuantizeOnDataOutPrecision, branch1Pooling->get_output_partial_shape(0));
|
||||
branch1Pooling->set_output_type(0, values.fakeQuantizeOnDataOutPrecision, branch1Pooling->get_output_partial_shape(0));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -28,6 +28,11 @@ namespace ngraph
|
||||
{
|
||||
class Node;
|
||||
|
||||
namespace runtime
|
||||
{
|
||||
class HostTensor;
|
||||
}
|
||||
using HostTensorPtr = std::shared_ptr<runtime::HostTensor>;
|
||||
namespace descriptor
|
||||
{
|
||||
/// \brief Compile-time descriptor of a first-class value that is a tensor.
|
||||
@ -56,9 +61,22 @@ namespace ngraph
|
||||
void set_element_type(const element::Type& elemenet_type);
|
||||
void set_partial_shape(const PartialShape& partial_shape);
|
||||
|
||||
/// \brief sets lower bound value description
|
||||
void set_lower_value(const HostTensorPtr& value);
|
||||
/// \brief sets upper bound value description
|
||||
void set_upper_value(const HostTensorPtr& value);
|
||||
/// \brief unsets bound value descriptions
|
||||
void invalidate_values();
|
||||
|
||||
const element::Type& get_element_type() const { return m_element_type; }
|
||||
const Shape& get_shape() const;
|
||||
const PartialShape& get_partial_shape() const { return m_partial_shape; }
|
||||
HostTensorPtr get_lower_value() const { return m_lower_value; }
|
||||
HostTensorPtr get_upper_value() const { return m_upper_value; }
|
||||
bool has_and_set_bound() const
|
||||
{
|
||||
return m_upper_value != nullptr && m_upper_value == m_lower_value;
|
||||
}
|
||||
size_t size() const;
|
||||
|
||||
protected:
|
||||
@ -71,6 +89,7 @@ namespace ngraph
|
||||
Shape m_shape;
|
||||
PartialShape m_partial_shape;
|
||||
Node* m_node{nullptr};
|
||||
HostTensorPtr m_lower_value, m_upper_value;
|
||||
size_t m_node_output_number{0};
|
||||
|
||||
std::string m_name;
|
||||
|
@ -174,6 +174,8 @@ namespace ngraph
|
||||
Function(const Function&) = delete;
|
||||
Function(const Function&&) = delete;
|
||||
Function& operator=(const Function&) = delete;
|
||||
/// \brief Checks all the Parameter nodes are registered in the list of Function parameters
|
||||
void check_all_parameters_registered() const;
|
||||
|
||||
static std::atomic<size_t> m_next_instance_id;
|
||||
std::string m_name;
|
||||
|
@ -208,6 +208,9 @@ namespace ngraph
|
||||
/// \returns true if successful
|
||||
virtual bool evaluate(const HostTensorVector& output_values,
|
||||
const HostTensorVector& input_values) const;
|
||||
virtual bool evaluate_lower(const HostTensorVector& output_values) const;
|
||||
virtual bool evaluate_upper(const HostTensorVector& output_values) const;
|
||||
|
||||
virtual bool constant_fold(OutputVector& output_values, const OutputVector& inputs_values);
|
||||
/// \brief Decomposes the FusedOp into a sub-graph consisting of core ngraph ops
|
||||
///
|
||||
@ -233,7 +236,12 @@ namespace ngraph
|
||||
/// Sets the number of outputs
|
||||
void set_output_size(size_t output_size);
|
||||
|
||||
void revalidate_and_infer_types() { validate_and_infer_types(); }
|
||||
void invalidate_values();
|
||||
void revalidate_and_infer_types()
|
||||
{
|
||||
invalidate_values();
|
||||
validate_and_infer_types();
|
||||
}
|
||||
/// \brief Get the string name for the type of the node, such as `Add` or `Multiply`.
|
||||
/// The class name, must not contain spaces as it is used for codegen.
|
||||
/// \returns A const reference to the node's type name
|
||||
|
@ -63,6 +63,8 @@ namespace ngraph
|
||||
void set_axis(int64_t axis) { m_axis = axis; }
|
||||
bool evaluate(const HostTensorVector& outputs,
|
||||
const HostTensorVector& inputs) const override;
|
||||
bool evaluate_lower(const HostTensorVector& output_values) const override;
|
||||
bool evaluate_upper(const HostTensorVector& output_values) const override;
|
||||
|
||||
protected:
|
||||
/// \ brief m_axis stores default value for all iterations
|
||||
|
@ -265,6 +265,8 @@ namespace ngraph
|
||||
|
||||
bool evaluate(const HostTensorVector& outputs,
|
||||
const HostTensorVector& inputs) const override;
|
||||
bool evaluate_lower(const HostTensorVector& outputs) const override;
|
||||
bool evaluate_upper(const HostTensorVector& outputs) const override;
|
||||
|
||||
// Don't constant fold a constant; it would make a copy
|
||||
bool constant_fold(OutputVector& outputs, const OutputVector& inputs) override
|
||||
|
@ -56,6 +56,8 @@ namespace ngraph
|
||||
|
||||
bool evaluate(const HostTensorVector& outputs,
|
||||
const HostTensorVector& inputs) const override;
|
||||
bool evaluate_lower(const HostTensorVector& outputs) const override;
|
||||
bool evaluate_upper(const HostTensorVector& outputs) const override;
|
||||
|
||||
protected:
|
||||
ngraph::element::Type m_destination_type;
|
||||
|
@ -49,6 +49,8 @@ namespace ngraph
|
||||
|
||||
bool evaluate(const HostTensorVector& outputs,
|
||||
const HostTensorVector& inputs) const override;
|
||||
bool evaluate_lower(const HostTensorVector& outputs) const override;
|
||||
bool evaluate_upper(const HostTensorVector& outputs) const override;
|
||||
|
||||
bool constant_fold(OutputVector& output_values,
|
||||
const OutputVector& inputs_values) override;
|
||||
|
@ -46,6 +46,8 @@ namespace ngraph
|
||||
|
||||
bool evaluate(const HostTensorVector& outputs,
|
||||
const HostTensorVector& inputs) const override;
|
||||
bool evaluate_lower(const HostTensorVector& outputs) const override;
|
||||
bool evaluate_upper(const HostTensorVector& outputs) const override;
|
||||
};
|
||||
}
|
||||
}
|
||||
|
@ -52,6 +52,8 @@ namespace ngraph
|
||||
|
||||
bool evaluate(const HostTensorVector& outputs,
|
||||
const HostTensorVector& inputs) const override;
|
||||
bool evaluate_lower(const HostTensorVector& outputs) const override;
|
||||
bool evaluate_upper(const HostTensorVector& outputs) const override;
|
||||
};
|
||||
}
|
||||
}
|
||||
|
@ -66,6 +66,8 @@ namespace ngraph
|
||||
void set_special_zero(bool special_zero) { m_special_zero = special_zero; }
|
||||
bool evaluate(const HostTensorVector& outputs,
|
||||
const HostTensorVector& inputs) const override;
|
||||
bool evaluate_lower(const HostTensorVector& outputs) const override;
|
||||
bool evaluate_upper(const HostTensorVector& outputs) const override;
|
||||
bool constant_fold(OutputVector& output_values,
|
||||
const OutputVector& inputs_values) override;
|
||||
|
||||
|
@ -55,6 +55,8 @@ namespace ngraph
|
||||
bool get_is_foldable() const { return m_is_foldable; }
|
||||
bool evaluate(const HostTensorVector& output_values,
|
||||
const HostTensorVector& input_values) const override;
|
||||
bool evaluate_lower(const HostTensorVector& output_values) const override;
|
||||
bool evaluate_upper(const HostTensorVector& output_values) const override;
|
||||
bool constant_fold(OutputVector& output_values,
|
||||
const OutputVector& input_values) override;
|
||||
|
||||
@ -91,6 +93,8 @@ namespace ngraph
|
||||
bool get_is_foldable() const { return m_is_foldable; }
|
||||
bool evaluate(const HostTensorVector& output_values,
|
||||
const HostTensorVector& input_values) const override;
|
||||
bool evaluate_lower(const HostTensorVector& output_values) const override;
|
||||
bool evaluate_upper(const HostTensorVector& output_values) const override;
|
||||
bool constant_fold(OutputVector& output_values,
|
||||
const OutputVector& input_values) override;
|
||||
|
||||
|
@ -44,6 +44,8 @@ namespace ngraph
|
||||
virtual void pre_validate_and_infer_types() override;
|
||||
bool evaluate(const HostTensorVector& outputs,
|
||||
const HostTensorVector& inputs) const override;
|
||||
bool evaluate_lower(const HostTensorVector& outputs) const override;
|
||||
bool evaluate_upper(const HostTensorVector& outputs) const override;
|
||||
bool constant_fold(OutputVector& output_values,
|
||||
const OutputVector& inputs_values) override;
|
||||
|
||||
|
@ -105,6 +105,8 @@ namespace ngraph
|
||||
size_t get_version() const override { return 1; }
|
||||
bool evaluate(const HostTensorVector& output_values,
|
||||
const HostTensorVector& input_values) const override;
|
||||
bool evaluate_lower(const HostTensorVector& outputs) const override;
|
||||
bool evaluate_upper(const HostTensorVector& outputs) const override;
|
||||
|
||||
private:
|
||||
AxisSet convert_mask_to_axis_set(const std::vector<int64_t>& mask) const;
|
||||
|
@ -40,6 +40,9 @@ namespace ngraph
|
||||
bool visit_attributes(AttributeVisitor& visitor) override;
|
||||
bool evaluate(const HostTensorVector& outputs,
|
||||
const HostTensorVector& inputs) const override;
|
||||
bool evaluate_lower(const HostTensorVector& output_values) const override;
|
||||
bool evaluate_upper(const HostTensorVector& output_values) const override;
|
||||
|
||||
bool constant_fold(OutputVector& output_values,
|
||||
const OutputVector& inputs_values) override;
|
||||
|
||||
|
@ -72,6 +72,8 @@ namespace ngraph
|
||||
const AutoBroadcastSpec& get_autob() const override { return m_autob; }
|
||||
void set_autob(const AutoBroadcastSpec& autob) { m_autob = autob; }
|
||||
bool visit_attributes(AttributeVisitor& visitor) override;
|
||||
bool evaluate_lower(const HostTensorVector& outputs) const override;
|
||||
bool evaluate_upper(const HostTensorVector& outputs) const override;
|
||||
|
||||
private:
|
||||
AutoBroadcastSpec m_autob;
|
||||
|
@ -74,14 +74,16 @@ namespace ngraph
|
||||
bool evaluate(const HostTensorPtr& arg0,
|
||||
const HostTensorPtr& out,
|
||||
const AxisSet& broadcast_axes) const;
|
||||
bool evaluate_lower(const HostTensorVector& outputs) const override;
|
||||
bool evaluate_upper(const HostTensorVector& outputs) const override;
|
||||
|
||||
PartialShape
|
||||
get_result_shape_pdpd(const PartialShape& arg0_shape,
|
||||
const Shape& target_shape,
|
||||
const PartialShape& target_shape,
|
||||
const op::BroadcastModeSpec& broadcast_spec) const;
|
||||
|
||||
void validate_target_shape_numpy(const PartialShape& arg_shape,
|
||||
const Shape& target_shape) const;
|
||||
const PartialShape& target_shape) const;
|
||||
|
||||
static std::pair<bool, AxisSet>
|
||||
get_broadcast_axes_numpy_pdpd(const Shape& arg_shape,
|
||||
@ -92,9 +94,9 @@ namespace ngraph
|
||||
get_broadcast_axes_none(const AxisVector axes_mapping_val,
|
||||
const size_t target_shape);
|
||||
|
||||
void validate_target_shape_none(const Shape& arg_shape,
|
||||
void validate_target_shape_none(const PartialShape& arg_shape,
|
||||
const AxisVector& axes_mapping_val,
|
||||
const Shape& target_shape) const;
|
||||
const PartialShape& target_shape) const;
|
||||
|
||||
Shape get_target_shape(const HostTensorPtr& input1) const;
|
||||
};
|
||||
|
@ -36,6 +36,9 @@ namespace ngraph
|
||||
private:
|
||||
void copy_runtime_info_to_target_inputs(const std::shared_ptr<Node>& node,
|
||||
const Output<Node>& replacement);
|
||||
/// \brief Folds pre-calculated output tensor values to constants in case lower and
|
||||
/// upper estimations are equal. Traverses graph backwards starting from the results.
|
||||
bool pre_calculated_values_folding(const std::shared_ptr<ngraph::Function>& f);
|
||||
};
|
||||
} // namespace pass
|
||||
} // namespace ngraph
|
||||
|
@ -226,6 +226,70 @@ namespace ngraph
|
||||
std::map<RawNodeOutput, HostTensorPtr>& output_tensor_map,
|
||||
const OutputVector& outputs);
|
||||
|
||||
/// \brief Evaluates lower value estimation of the output tensor. Traverses graph up to deduce
|
||||
/// estimation through it.
|
||||
/// \param Node output pointing to the tensor for estimation.
|
||||
/// \return HostTensorPtr to estimated value if can be determined, or nullptr.
|
||||
NGRAPH_API HostTensorPtr evaluate_lower_bound(const Output<Node>& output);
|
||||
|
||||
/// \brief Evaluates lower value estimation of the output tensor. Traverses graph up to deduce
|
||||
/// estimation through it.
|
||||
/// \param output Tensor to be estimated.
|
||||
/// \return HostTensorPtr to estimated value if can be determined, or nullptr.
|
||||
NGRAPH_API HostTensorPtr evaluate_upper_bound(const Output<Node>& output);
|
||||
|
||||
/// \brief Evaluates lower and upper value estimations of the output tensor. Traverses graph up
|
||||
/// to deduce estimation through it.
|
||||
/// \param output Node output pointing to the tensor for estimation.
|
||||
/// \return pair with HostTensorPtrs for lower and upper value estimation. Each object in pair
|
||||
/// could be HostTensorPtr to estimated value if particular bound can be determined, or nullptr.
|
||||
NGRAPH_API std::pair<HostTensorPtr, HostTensorPtr>
|
||||
evaluate_both_bounds(const Output<Node>& output);
|
||||
|
||||
/// \brief Evaluates lower and upper value estimations for the output tensor. Estimation would
|
||||
/// be represented as partial shape object using Dimension(min, max) for each element.
|
||||
/// \param output Node output pointing to the tensor for estimation.
|
||||
/// \param pshape Resulting estimation would be stored in this PartialShape.
|
||||
/// \return boolean status if value evaluation was successful.
|
||||
NGRAPH_API bool evaluate_as_partial_shape(const Output<Node>& output, PartialShape& pshape);
|
||||
|
||||
/// \brief Estimates upper bound for node output tensors using only upper bounds of the nodes
|
||||
/// inputs.
|
||||
/// \param node Operation to be performed
|
||||
/// \param output_values Vector of HostTensorPtrs representing resulting upper value estimations
|
||||
/// \return boolean status if value evaluation was successful.
|
||||
NGRAPH_API bool default_upper_bound_evaluator(const Node* node,
|
||||
const HostTensorVector& output_values);
|
||||
/// \brief Estimates lower bound for node output tensors using only lower bounds of the nodes
|
||||
/// inputs.
|
||||
/// \param node Operation to be performed
|
||||
/// \param output_values Vector of HostTensorPtrs representing resulting lower value estimations
|
||||
/// \return boolean status if value evaluation was successful.
|
||||
NGRAPH_API bool default_lower_bound_evaluator(const Node* node,
|
||||
const HostTensorVector& output_values);
|
||||
|
||||
NGRAPH_API bool interval_bound_evaluator(const Node* node,
|
||||
const HostTensorVector& lower_output_values,
|
||||
const HostTensorVector& upper_output_values);
|
||||
|
||||
/// \brief Checks if all the elements of the bound HostTensor are positive
|
||||
NGRAPH_API bool host_tensor_is_positive(const HostTensorPtr& bound);
|
||||
|
||||
/// \brief Checks if lower and upper bounds of the corresponding tensor are set (not nullptr)
|
||||
/// and pointers are the same. It doesn't check if lower and upper values are the same relying
|
||||
/// only on pointers comparison.
|
||||
NGRAPH_API bool has_and_set_equal_bounds(const Output<Node>& source);
|
||||
|
||||
/// \brief Runs an estimation of source tensor. If it succeeded to calculate both bounds and
|
||||
/// they are the same returns Constant operation from the resulting bound, otherwise nullptr.
|
||||
NGRAPH_API std::shared_ptr<op::Constant> get_constant_from_source(const Output<Node>& source);
|
||||
|
||||
/// \brief Returns a Constant storing scalar value equal to std::numeric_limits<t>::max()
|
||||
NGRAPH_API std::shared_ptr<op::Constant> get_constant_max_of_type(element::Type_t t);
|
||||
|
||||
/// \brief Returns a Constant storing scalar value equal to std::numeric_limits<t>::min()
|
||||
NGRAPH_API std::shared_ptr<op::Constant> get_constant_min_of_type(element::Type_t t);
|
||||
|
||||
namespace opset1
|
||||
{
|
||||
///
|
||||
|
@ -16,6 +16,7 @@
|
||||
|
||||
#include "ngraph/descriptor/tensor.hpp"
|
||||
#include "ngraph/node.hpp"
|
||||
#include "ngraph/runtime/host_tensor.hpp"
|
||||
|
||||
using namespace ngraph;
|
||||
using namespace std;
|
||||
@ -67,6 +68,26 @@ void descriptor::Tensor::set_partial_shape(const PartialShape& partial_shape)
|
||||
}
|
||||
}
|
||||
|
||||
void descriptor::Tensor::invalidate_values()
|
||||
{
|
||||
m_upper_value = nullptr;
|
||||
m_lower_value = nullptr;
|
||||
}
|
||||
|
||||
void descriptor::Tensor::set_lower_value(const HostTensorPtr& value)
|
||||
{
|
||||
NGRAPH_CHECK(m_partial_shape.same_scheme(value->get_partial_shape()));
|
||||
NGRAPH_CHECK(m_element_type == value->get_element_type());
|
||||
m_lower_value = value;
|
||||
}
|
||||
|
||||
void descriptor::Tensor::set_upper_value(const HostTensorPtr& value)
|
||||
{
|
||||
NGRAPH_CHECK(m_partial_shape.same_scheme(value->get_partial_shape()));
|
||||
NGRAPH_CHECK(m_element_type == value->get_element_type());
|
||||
m_upper_value = value;
|
||||
}
|
||||
|
||||
const Shape& descriptor::Tensor::get_shape() const
|
||||
{
|
||||
if (m_partial_shape.is_static())
|
||||
|
@ -23,7 +23,6 @@
|
||||
#include "ngraph/graph_util.hpp"
|
||||
#include "ngraph/log.hpp"
|
||||
#include "ngraph/op/util/op_types.hpp"
|
||||
#include "ngraph/util.hpp"
|
||||
#include "ngraph/validation_util.hpp"
|
||||
|
||||
using namespace std;
|
||||
@ -42,7 +41,7 @@ Function::Function(const ResultVector& results,
|
||||
, m_unique_name("Function_" + to_string(m_next_instance_id.fetch_add(1)))
|
||||
, m_topological_sorter(topological_sort<std::vector<std::shared_ptr<Node>>>)
|
||||
{
|
||||
validate_nodes_and_infer_types();
|
||||
check_all_parameters_registered();
|
||||
}
|
||||
|
||||
Function::Function(const OutputVector& results,
|
||||
@ -54,7 +53,7 @@ Function::Function(const OutputVector& results,
|
||||
, m_unique_name("Function_" + to_string(m_next_instance_id.fetch_add(1)))
|
||||
, m_topological_sorter(topological_sort<std::vector<std::shared_ptr<Node>>>)
|
||||
{
|
||||
validate_nodes_and_infer_types();
|
||||
check_all_parameters_registered();
|
||||
}
|
||||
|
||||
Function::Function(const NodeVector& results,
|
||||
@ -66,7 +65,7 @@ Function::Function(const NodeVector& results,
|
||||
, m_unique_name("Function_" + to_string(m_next_instance_id.fetch_add(1)))
|
||||
, m_topological_sorter(topological_sort<std::vector<std::shared_ptr<Node>>>)
|
||||
{
|
||||
validate_nodes_and_infer_types();
|
||||
check_all_parameters_registered();
|
||||
}
|
||||
|
||||
Function::Function(const std::shared_ptr<Node>& result,
|
||||
@ -87,7 +86,7 @@ Function::Function(const ResultVector& results,
|
||||
, m_unique_name("Function_" + to_string(m_next_instance_id.fetch_add(1)))
|
||||
, m_topological_sorter(topological_sort<std::vector<std::shared_ptr<Node>>>)
|
||||
{
|
||||
validate_nodes_and_infer_types();
|
||||
check_all_parameters_registered();
|
||||
}
|
||||
|
||||
Function::Function(const OutputVector& results,
|
||||
@ -98,25 +97,38 @@ Function::Function(const OutputVector& results,
|
||||
{
|
||||
}
|
||||
|
||||
void Function::check_all_parameters_registered() const
|
||||
{
|
||||
OV_ITT_SCOPED_TASK(ngraph::itt::domains::nGraphPass_LT,
|
||||
"Function::check_all_parameters_registered");
|
||||
std::stringstream unregistered_parameters;
|
||||
for (auto& node : get_ordered_ops())
|
||||
{
|
||||
if (op::is_parameter(node) &&
|
||||
std::find(m_parameters.begin(), m_parameters.end(), node) == m_parameters.end())
|
||||
unregistered_parameters << node << std::endl;
|
||||
}
|
||||
if (!unregistered_parameters.str().empty())
|
||||
throw ngraph_error("Function references undeclared parameters: " +
|
||||
unregistered_parameters.str());
|
||||
}
|
||||
|
||||
void Function::validate_nodes_and_infer_types() const
|
||||
{
|
||||
OV_ITT_SCOPED_TASK(ngraph::itt::domains::nGraphPass_LT,
|
||||
"Function::validate_nodes_and_infer_types");
|
||||
|
||||
std::stringstream unregistered_parameters;
|
||||
for (auto& node : get_ordered_ops())
|
||||
{
|
||||
node->revalidate_and_infer_types();
|
||||
|
||||
// If we find a parameter make sure it is in the list of parameters of the function
|
||||
if (op::is_parameter(node))
|
||||
{
|
||||
auto it = std::find(m_parameters.begin(), m_parameters.end(), node);
|
||||
if (it == m_parameters.end())
|
||||
{
|
||||
throw ngraph_error("Function references undeclared parameter");
|
||||
}
|
||||
}
|
||||
if (op::is_parameter(node) &&
|
||||
std::find(m_parameters.begin(), m_parameters.end(), node) == m_parameters.end())
|
||||
unregistered_parameters << node << std::endl;
|
||||
}
|
||||
if (!unregistered_parameters.str().empty())
|
||||
throw ngraph_error("Function references undeclared parameters: " +
|
||||
unregistered_parameters.str());
|
||||
}
|
||||
|
||||
std::vector<shared_ptr<Node>> Function::get_ordered_ops() const
|
||||
|
@ -15,6 +15,7 @@
|
||||
//*****************************************************************************
|
||||
|
||||
#include <memory>
|
||||
#include <ngraph/validation_util.hpp>
|
||||
#include <sstream>
|
||||
#include <typeindex>
|
||||
#include <typeinfo>
|
||||
@ -246,6 +247,12 @@ void Node::set_output_size(size_t n)
|
||||
}
|
||||
}
|
||||
|
||||
void Node::invalidate_values()
|
||||
{
|
||||
for (const auto& output : outputs())
|
||||
output.get_tensor().invalidate_values();
|
||||
}
|
||||
|
||||
void Node::validate_and_infer_types()
|
||||
{
|
||||
}
|
||||
@ -956,6 +963,28 @@ bool Node::evaluate(const HostTensorVector& output_values,
|
||||
return false;
|
||||
}
|
||||
|
||||
bool Node::evaluate_lower(const HostTensorVector& output_values) const
|
||||
{
|
||||
const auto& inputs = input_values();
|
||||
bool dyn_inputs = std::any_of(inputs.begin(), inputs.end(), [](const Output<Node>& output) {
|
||||
return !output.get_tensor().has_and_set_bound();
|
||||
});
|
||||
if (dyn_inputs)
|
||||
return false;
|
||||
return default_lower_bound_evaluator(this, output_values);
|
||||
}
|
||||
|
||||
bool Node::evaluate_upper(const HostTensorVector& output_values) const
|
||||
{
|
||||
const auto& inputs = input_values();
|
||||
bool dyn_inputs = std::any_of(inputs.begin(), inputs.end(), [](const Output<Node>& output) {
|
||||
return !output.get_tensor().has_and_set_bound();
|
||||
});
|
||||
if (dyn_inputs)
|
||||
return false;
|
||||
return default_upper_bound_evaluator(this, output_values);
|
||||
}
|
||||
|
||||
bool Node::constant_fold(OutputVector& output_values, const OutputVector& input_values)
|
||||
{
|
||||
OV_ITT_SCOPED_TASK(itt::domains::nGraph, "Node::constant_fold");
|
||||
@ -966,22 +995,23 @@ bool Node::constant_fold(OutputVector& output_values, const OutputVector& input_
|
||||
}
|
||||
|
||||
// If all the inputs are constants, try to evaluate the outputs
|
||||
bool all_constants =
|
||||
std::all_of(input_values.begin(), input_values.end(), [](const Output<Node>& input) {
|
||||
return as_type_ptr<op::v0::Constant>(input.get_node_shared_ptr());
|
||||
});
|
||||
if (!all_constants)
|
||||
return false;
|
||||
|
||||
HostTensorVector input_tensors;
|
||||
for (auto input : input_values)
|
||||
for (const auto& input : input_values)
|
||||
{
|
||||
if (auto constant = as_type_ptr<op::v0::Constant>(input.get_node_shared_ptr()))
|
||||
{
|
||||
auto host_tensor = make_shared<runtime::HostTensor>(constant);
|
||||
input_tensors.push_back(host_tensor);
|
||||
}
|
||||
else
|
||||
{
|
||||
return false;
|
||||
}
|
||||
auto host_tensor = make_shared<runtime::HostTensor>(
|
||||
as_type_ptr<op::v0::Constant>(input.get_node_shared_ptr()));
|
||||
input_tensors.push_back(host_tensor);
|
||||
}
|
||||
HostTensorVector output_tensors;
|
||||
OutputVector output_constants;
|
||||
for (auto output : outputs())
|
||||
for (const auto& output : outputs())
|
||||
{
|
||||
auto tensor =
|
||||
make_shared<HostTensor>(output.get_element_type(), output.get_partial_shape());
|
||||
|
@ -17,6 +17,7 @@
|
||||
#include <cstddef>
|
||||
#include <memory>
|
||||
#include <ngraph/ops.hpp>
|
||||
#include <ngraph/validation_util.hpp>
|
||||
#include <numeric>
|
||||
#include "itt.hpp"
|
||||
|
||||
@ -77,9 +78,11 @@ void op::v1::BatchToSpace::validate_and_infer_types()
|
||||
auto crops_begin = input_value(2);
|
||||
auto crops_end = input_value(3);
|
||||
|
||||
if (ngraph::op::is_constant(block.get_node_shared_ptr()) &&
|
||||
ngraph::op::is_constant(crops_begin.get_node_shared_ptr()) &&
|
||||
ngraph::op::is_constant(crops_end.get_node_shared_ptr()) && data_pshape.is_static())
|
||||
auto block_const = get_constant_from_source(block);
|
||||
auto crops_begin_const = get_constant_from_source(crops_begin);
|
||||
auto crops_end_const = get_constant_from_source(crops_end);
|
||||
|
||||
if (block_const && crops_begin_const && crops_end_const && data_pshape.is_static())
|
||||
{
|
||||
const auto& data_shape = data.get_shape();
|
||||
|
||||
@ -90,14 +93,9 @@ void op::v1::BatchToSpace::validate_and_infer_types()
|
||||
data_shape.size(),
|
||||
")");
|
||||
|
||||
auto block_val = std::dynamic_pointer_cast<op::Constant>(block.get_node_shared_ptr())
|
||||
->cast_vector<int64_t>();
|
||||
auto crops_begin_val =
|
||||
std::dynamic_pointer_cast<op::Constant>(crops_begin.get_node_shared_ptr())
|
||||
->cast_vector<int64_t>();
|
||||
auto crops_end_val =
|
||||
std::dynamic_pointer_cast<op::Constant>(crops_end.get_node_shared_ptr())
|
||||
->cast_vector<int64_t>();
|
||||
auto block_val = block_const->cast_vector<int64_t>();
|
||||
auto crops_begin_val = crops_begin_const->cast_vector<int64_t>();
|
||||
auto crops_end_val = crops_end_const->cast_vector<int64_t>();
|
||||
|
||||
int64_t block_prod = 1;
|
||||
for (long val : block_val)
|
||||
|
@ -21,6 +21,7 @@
|
||||
#include "ngraph/op/constant.hpp"
|
||||
#include "ngraph/partial_shape.hpp"
|
||||
|
||||
#include <ngraph/validation_util.hpp>
|
||||
#include <numeric>
|
||||
#include "ngraph/runtime/host_tensor.hpp"
|
||||
#include "ngraph/runtime/reference/broadcast.hpp"
|
||||
@ -185,8 +186,7 @@ void op::v3::Broadcast::validate_and_infer_types()
|
||||
{
|
||||
auto arg_shape = get_input_partial_shape(0);
|
||||
|
||||
const auto shape_constant =
|
||||
as_type_ptr<op::v0::Constant>(input_value(1).get_node_shared_ptr());
|
||||
const auto shape_constant = get_constant_from_source(input_value(1));
|
||||
if (shape_constant)
|
||||
{
|
||||
auto target_shape = shape_constant->get_shape_val();
|
||||
|
@ -15,6 +15,7 @@
|
||||
//*****************************************************************************
|
||||
|
||||
#include <memory>
|
||||
#include <ngraph/validation_util.hpp>
|
||||
|
||||
#include "itt.hpp"
|
||||
#include "ngraph/attribute_visitor.hpp"
|
||||
@ -152,3 +153,12 @@ bool op::Concat::evaluate(const HostTensorVector& outputs, const HostTensorVecto
|
||||
auto concat_axis = get_axis() < 0 ? get_axis() + inputs[0]->get_shape().size() : get_axis();
|
||||
return evaluate_concat(inputs, outputs[0], concat_axis);
|
||||
}
|
||||
bool op::Concat::evaluate_lower(const HostTensorVector& output_values) const
|
||||
{
|
||||
return default_lower_bound_evaluator(this, output_values);
|
||||
}
|
||||
|
||||
bool op::Concat::evaluate_upper(const HostTensorVector& output_values) const
|
||||
{
|
||||
return default_upper_bound_evaluator(this, output_values);
|
||||
}
|
@ -17,6 +17,7 @@
|
||||
#include <cmath>
|
||||
#include <cstdio>
|
||||
#include <cstring>
|
||||
#include <ngraph/validation_util.hpp>
|
||||
|
||||
#include "itt.hpp"
|
||||
#include "ngraph/log.hpp"
|
||||
@ -647,6 +648,15 @@ bool op::v0::Constant::evaluate(const HostTensorVector& outputs,
|
||||
return true;
|
||||
}
|
||||
|
||||
bool op::v0::Constant::evaluate_lower(const HostTensorVector& outputs) const
|
||||
{
|
||||
return evaluate(outputs, {});
|
||||
}
|
||||
bool op::v0::Constant::evaluate_upper(const HostTensorVector& outputs) const
|
||||
{
|
||||
return evaluate(outputs, {});
|
||||
}
|
||||
|
||||
//
|
||||
// We have to open up namespace blocks here to work around a problem with gcc:
|
||||
//
|
||||
|
@ -15,9 +15,12 @@
|
||||
//*****************************************************************************
|
||||
|
||||
#include <memory>
|
||||
#include <ngraph/validation_util.hpp>
|
||||
|
||||
#include "itt.hpp"
|
||||
#include "ngraph/op/convert.hpp"
|
||||
#include "ngraph/op/equal.hpp"
|
||||
#include "ngraph/op/select.hpp"
|
||||
#include "ngraph/runtime/reference/convert.hpp"
|
||||
|
||||
using namespace std;
|
||||
@ -116,6 +119,41 @@ namespace convert
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
bool evaluate_bound(const Node* node, const HostTensorVector& output_values, bool is_upper)
|
||||
{
|
||||
const auto& input = node->input_value(0);
|
||||
if (const auto& value = is_upper ? input.get_tensor().get_upper_value()
|
||||
: input.get_tensor().get_lower_value())
|
||||
{
|
||||
// constants for dynamic values translation
|
||||
auto input_maximum_value = get_constant_max_of_type(input.get_element_type());
|
||||
auto output_maximum_value =
|
||||
get_constant_max_of_type(output_values[0]->get_element_type());
|
||||
if (input_maximum_value == nullptr || output_maximum_value == nullptr)
|
||||
return false;
|
||||
|
||||
bool status = node->evaluate(output_values, {value});
|
||||
|
||||
if (!status)
|
||||
return status;
|
||||
|
||||
// dynamic values translation
|
||||
auto input_dynamic_mask =
|
||||
std::make_shared<HostTensor>(element::boolean, input.get_shape());
|
||||
status = op::v1::Equal().evaluate(
|
||||
{input_dynamic_mask}, {value, std::make_shared<HostTensor>(input_maximum_value)});
|
||||
if (!status)
|
||||
return status;
|
||||
status = op::v1::Select().evaluate(output_values,
|
||||
{input_dynamic_mask,
|
||||
std::make_shared<HostTensor>(output_maximum_value),
|
||||
output_values[0]});
|
||||
return status;
|
||||
}
|
||||
else
|
||||
return false;
|
||||
}
|
||||
}
|
||||
bool op::v0::Convert::evaluate(const HostTensorVector& output_values,
|
||||
const HostTensorVector& input_values) const
|
||||
@ -123,3 +161,13 @@ bool op::v0::Convert::evaluate(const HostTensorVector& output_values,
|
||||
NGRAPH_OP_SCOPE(v0_Convert_evaluate);
|
||||
return convert::evaluate_convert(input_values[0], output_values[0]);
|
||||
}
|
||||
|
||||
bool op::v0::Convert::evaluate_lower(const HostTensorVector& output_values) const
|
||||
{
|
||||
return convert::evaluate_bound(this, output_values, false);
|
||||
}
|
||||
|
||||
bool op::v0::Convert::evaluate_upper(const HostTensorVector& output_values) const
|
||||
{
|
||||
return convert::evaluate_bound(this, output_values, true);
|
||||
}
|
||||
|
@ -221,7 +221,7 @@ bool op::v1::ConvolutionBackpropData::is_dynamic() const
|
||||
bool is_dynamic = Node::is_dynamic();
|
||||
if (inputs().size() == 3 && !is_dynamic)
|
||||
{
|
||||
return !is_type<op::Constant>(input_value(2).get_node());
|
||||
return !has_and_set_equal_bounds(input_value(2));
|
||||
}
|
||||
return is_dynamic;
|
||||
}
|
||||
@ -242,7 +242,7 @@ const PartialShape op::v1::ConvolutionBackpropData::get_output_shape() const
|
||||
bool is_output_shape_present = inputs().size() == 3;
|
||||
if (is_output_shape_present)
|
||||
{
|
||||
if (auto const_op = as_type<op::Constant>(input_value(2).get_node()))
|
||||
if (auto const_op = get_constant_from_source(input_value(2)))
|
||||
{
|
||||
shape = const_op->get_shape_val();
|
||||
}
|
||||
|
@ -16,8 +16,6 @@
|
||||
|
||||
#include "ngraph/op/divide.hpp"
|
||||
#include "itt.hpp"
|
||||
#include "ngraph/op/multiply.hpp"
|
||||
#include "ngraph/op/negative.hpp"
|
||||
#include "ngraph/runtime/host_tensor.hpp"
|
||||
#include "ngraph/runtime/reference/divide.hpp"
|
||||
|
||||
|
@ -15,6 +15,7 @@
|
||||
//*****************************************************************************
|
||||
|
||||
#include "ngraph/op/embedding_segments_sum.hpp"
|
||||
#include <ngraph/validation_util.hpp>
|
||||
#include "itt.hpp"
|
||||
#include "ngraph/op/constant.hpp"
|
||||
#include "ngraph/opsets/opset3.hpp"
|
||||
@ -161,8 +162,7 @@ void op::v3::EmbeddingSegmentsSum::validate_and_infer_types()
|
||||
if (emb_table_shape.rank().is_static())
|
||||
{
|
||||
result_shape = emb_table_shape;
|
||||
if (auto num_segments_const =
|
||||
as_type<opset3::Constant>(this->get_input_node_ptr(NUM_SEGMENTS)))
|
||||
if (const auto& num_segments_const = get_constant_from_source(input_value(NUM_SEGMENTS)))
|
||||
{
|
||||
result_shape[0] = num_segments_const->cast_vector<int64_t>()[0];
|
||||
}
|
||||
|
@ -23,7 +23,7 @@
|
||||
#include "ngraph/runtime/reference/gather.hpp"
|
||||
#include "ngraph/shape.hpp"
|
||||
|
||||
#include <limits>
|
||||
#include <ngraph/validation_util.hpp>
|
||||
|
||||
NGRAPH_SUPPRESS_DEPRECATED_START
|
||||
|
||||
@ -118,8 +118,7 @@ void op::v1::Gather::validate_and_infer_types()
|
||||
int64_t op::v1::Gather::get_axis() const
|
||||
{
|
||||
int64_t axis = AXIS_NOT_SET_VALUE;
|
||||
auto axes_input_node = input_value(AXIS).get_node_shared_ptr();
|
||||
if (auto const_op = as_type_ptr<op::Constant>(axes_input_node))
|
||||
if (const auto& const_op = get_constant_from_source(input_value(AXIS)))
|
||||
{
|
||||
axis = const_op->cast_vector<int64_t>()[0];
|
||||
}
|
||||
@ -319,6 +318,22 @@ bool op::v1::Gather::evaluate(const HostTensorVector& outputs, const HostTensorV
|
||||
return evaluate_gather(outputs, inputs);
|
||||
}
|
||||
|
||||
bool op::v1::Gather::evaluate_lower(const HostTensorVector& output_values) const
|
||||
{
|
||||
if (!input_value(INDICES).get_tensor().has_and_set_bound() ||
|
||||
!input_value(AXIS).get_tensor().has_and_set_bound())
|
||||
return false;
|
||||
return default_lower_bound_evaluator(this, output_values);
|
||||
}
|
||||
|
||||
bool op::v1::Gather::evaluate_upper(const HostTensorVector& output_values) const
|
||||
{
|
||||
if (!input_value(INDICES).get_tensor().has_and_set_bound() ||
|
||||
!input_value(AXIS).get_tensor().has_and_set_bound())
|
||||
return false;
|
||||
return default_upper_bound_evaluator(this, output_values);
|
||||
}
|
||||
|
||||
bool op::v1::Gather::constant_fold(OutputVector& output_values, const OutputVector& input_values)
|
||||
{
|
||||
// try the regular constant folding just for the Gather node
|
||||
|
@ -284,7 +284,7 @@ bool op::v1::GroupConvolutionBackpropData::is_dynamic() const
|
||||
bool is_dynamic = Node::is_dynamic();
|
||||
if (inputs().size() == 3 && !is_dynamic)
|
||||
{
|
||||
return !is_type<op::Constant>(input_value(2).get_node());
|
||||
return !has_and_set_equal_bounds(input_value(2));
|
||||
}
|
||||
return is_dynamic;
|
||||
}
|
||||
@ -305,7 +305,7 @@ const PartialShape op::v1::GroupConvolutionBackpropData::get_convolution_output_
|
||||
bool is_output_shape_present = inputs().size() == 3;
|
||||
if (is_output_shape_present)
|
||||
{
|
||||
if (auto const_op = as_type<op::Constant>(input_value(2).get_node()))
|
||||
if (const auto& const_op = get_constant_from_source(input_value(2)))
|
||||
{
|
||||
shape = const_op->get_shape_val();
|
||||
}
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include <algorithm>
|
||||
#include <cmath>
|
||||
#include <cstring>
|
||||
#include <ngraph/validation_util.hpp>
|
||||
#include <numeric>
|
||||
#include "itt.hpp"
|
||||
#include "ngraph/op/constant.hpp"
|
||||
@ -67,7 +68,7 @@ void op::v0::Interpolate::validate_and_infer_types()
|
||||
}
|
||||
}
|
||||
|
||||
if (auto const_shape = as_type_ptr<op::v0::Constant>(input_value(1).get_node_shared_ptr()))
|
||||
if (const auto& const_shape = get_constant_from_source(input_value(1)))
|
||||
{
|
||||
auto out_shape = const_shape->cast_vector<int64_t>();
|
||||
size_t i = 0;
|
||||
@ -166,8 +167,8 @@ std::vector<int64_t> op::v4::Interpolate::get_axes() const
|
||||
return default_value;
|
||||
}
|
||||
|
||||
auto axes_node = as_type_ptr<op::v0::Constant>(input_value(3).get_node_shared_ptr());
|
||||
NODE_VALIDATION_CHECK(this, axes_node, "Input 'axes' should be Constant.");
|
||||
auto axes_node = get_constant_from_source(input_value(3));
|
||||
NODE_VALIDATION_CHECK(this, axes_node, "Input 'axes' should be Constant or foldable.");
|
||||
|
||||
return axes_node->cast_vector<int64_t>();
|
||||
}
|
||||
@ -259,7 +260,7 @@ void op::v4::Interpolate::validate_and_infer_types()
|
||||
set_output_type(0, get_input_element_type(0), output_shape);
|
||||
if (m_attrs.shape_calculation_mode == ShapeCalcMode::scales)
|
||||
{
|
||||
if (auto const_scales = as_type_ptr<op::v0::Constant>(input_value(2).get_node_shared_ptr()))
|
||||
if (const auto& const_scales = get_constant_from_source(input_value(2)))
|
||||
{
|
||||
auto scales = const_scales->cast_vector<float>();
|
||||
infer_using_scales(output_shape, axes, scales, padded_input_shape);
|
||||
@ -267,7 +268,7 @@ void op::v4::Interpolate::validate_and_infer_types()
|
||||
}
|
||||
else
|
||||
{
|
||||
if (auto const_shape = as_type_ptr<op::v0::Constant>(input_value(1).get_node_shared_ptr()))
|
||||
if (const auto& const_shape = get_constant_from_source(input_value(1)))
|
||||
{
|
||||
auto sizes = const_shape->cast_vector<int64_t>();
|
||||
infer_using_shapes(output_shape, axes, sizes);
|
||||
|
@ -72,8 +72,7 @@ void op::v5::Loop::validate_and_infer_types()
|
||||
loop_condition_rank.compatible(0),
|
||||
"Rank of ExecutionCondition input must be equal to 0 or 1");
|
||||
}
|
||||
if (const auto& cond_value = std::dynamic_pointer_cast<const ngraph::opset5::Constant>(
|
||||
loop_execution_condition.get_node_shared_ptr()))
|
||||
if (const auto& cond_value = get_constant_from_source(loop_execution_condition))
|
||||
{
|
||||
auto val = cond_value->cast_vector<bool>();
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
@ -101,8 +100,7 @@ void op::v5::Loop::validate_and_infer_types()
|
||||
body_condition_rank.compatible(1),
|
||||
"Rank of BodyExecutionCondition output must be equal to 0 or 1");
|
||||
}
|
||||
if (const auto& cond_value = std::dynamic_pointer_cast<const ngraph::opset5::Constant>(
|
||||
body_execution_condition.get_node_shared_ptr()))
|
||||
if (const auto& cond_value = get_constant_from_source(body_execution_condition))
|
||||
{
|
||||
auto val = cond_value->cast_vector<bool>();
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
@ -127,8 +125,7 @@ void op::v5::Loop::validate_and_infer_types()
|
||||
if (m_body->get_parameters().at(desc->m_body_parameter_index) == cond_param)
|
||||
{
|
||||
if (const auto& cond_value =
|
||||
std::dynamic_pointer_cast<const ngraph::opset5::Constant>(
|
||||
input_value(desc->m_input_index).get_node_shared_ptr()))
|
||||
get_constant_from_source(input_value(desc->m_input_index)))
|
||||
{
|
||||
auto val = cond_value->cast_vector<bool>();
|
||||
NODE_VALIDATION_CHECK(
|
||||
@ -157,8 +154,7 @@ void op::v5::Loop::validate_and_infer_types()
|
||||
trip_count_rank.compatible(1) || trip_count_rank.compatible(0),
|
||||
"Rank of TripCount input must be equal to 0 or 1");
|
||||
}
|
||||
if (const auto& trip_count_val = std::dynamic_pointer_cast<const ngraph::opset5::Constant>(
|
||||
trip_count.get_node_shared_ptr()))
|
||||
if (const auto& trip_count_val = get_constant_from_source(trip_count))
|
||||
{
|
||||
auto val = trip_count_val->cast_vector<int64_t>();
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
|
@ -15,6 +15,7 @@
|
||||
//*****************************************************************************
|
||||
|
||||
#include "ngraph/op/lrn.hpp"
|
||||
#include <ngraph/validation_util.hpp>
|
||||
#include "itt.hpp"
|
||||
#include "ngraph/attribute_visitor.hpp"
|
||||
#include "ngraph/op/constant.hpp"
|
||||
@ -50,10 +51,8 @@ AxisSet op::LRN::get_reduction_axes() const
|
||||
{
|
||||
AxisSet axes{1}; // channel axis as default
|
||||
auto axes_input_node = input_value(1).get_node_shared_ptr();
|
||||
if (auto const_op = as_type_ptr<op::Constant>(axes_input_node))
|
||||
{
|
||||
if (const auto& const_op = get_constant_from_source(axes_input_node))
|
||||
axes = const_op->get_axis_set_val();
|
||||
}
|
||||
return axes;
|
||||
}
|
||||
|
||||
|
@ -15,6 +15,7 @@
|
||||
//*****************************************************************************
|
||||
|
||||
#include "ngraph/op/min.hpp"
|
||||
#include <ngraph/validation_util.hpp>
|
||||
#include "itt.hpp"
|
||||
#include "ngraph/graph_util.hpp"
|
||||
#include "ngraph/runtime/host_tensor.hpp"
|
||||
@ -83,3 +84,17 @@ bool op::v1::ReduceMin::evaluate(const HostTensorVector& outputs,
|
||||
NGRAPH_OP_SCOPE(v1_ReduceMin_evaluate);
|
||||
return minop::evaluate_min(inputs[0], outputs[0], get_reduction_axes(), get_keep_dims());
|
||||
}
|
||||
|
||||
bool op::v1::ReduceMin::evaluate_lower(const HostTensorVector& output_values) const
|
||||
{
|
||||
if (!input_value(1).get_tensor().has_and_set_bound())
|
||||
return false;
|
||||
return default_lower_bound_evaluator(this, output_values);
|
||||
}
|
||||
|
||||
bool op::v1::ReduceMin::evaluate_upper(const HostTensorVector& output_values) const
|
||||
{
|
||||
if (!input_value(1).get_tensor().has_and_set_bound())
|
||||
return false;
|
||||
return default_upper_bound_evaluator(this, output_values);
|
||||
}
|
@ -43,6 +43,7 @@ op::v1::Mod::Mod(const Output<Node>& A,
|
||||
: FusedOp({A, B})
|
||||
, m_auto_broadcast(auto_broadcast)
|
||||
{
|
||||
constructor_validate_and_infer_types();
|
||||
}
|
||||
|
||||
bool ngraph::op::v1::Mod::visit_attributes(AttributeVisitor& visitor)
|
||||
|
@ -16,6 +16,7 @@
|
||||
|
||||
#include "ngraph/op/non_max_suppression.hpp"
|
||||
#include <cstring>
|
||||
#include <ngraph/validation_util.hpp>
|
||||
#include "itt.hpp"
|
||||
#include "ngraph/attribute_visitor.hpp"
|
||||
#include "ngraph/op/constant.hpp"
|
||||
@ -178,12 +179,12 @@ void op::v1::NonMaxSuppression::validate_and_infer_types()
|
||||
"The last dimension of the 'boxes' input must be equal to 4. Got:",
|
||||
boxes_ps[2]);
|
||||
|
||||
const auto max_output_boxes_per_class = input_value(2).get_node_shared_ptr();
|
||||
if (num_boxes_boxes.is_static() && scores_ps[1].is_static() &&
|
||||
op::is_constant(max_output_boxes_per_class))
|
||||
const auto& max_output_boxes_input = get_constant_from_source(input_value(2));
|
||||
if (num_boxes_boxes.is_static() && scores_ps[1].is_static() && max_output_boxes_input)
|
||||
{
|
||||
const auto num_boxes = num_boxes_boxes.get_length();
|
||||
const auto max_output_boxes_per_class = max_boxes_output_from_input();
|
||||
const auto max_output_boxes_per_class =
|
||||
max_output_boxes_input->cast_vector<int64_t>().at(0);
|
||||
const auto num_classes = scores_ps[1].get_length();
|
||||
|
||||
out_shape[0] = std::min(num_boxes, max_output_boxes_per_class * num_classes);
|
||||
@ -195,8 +196,7 @@ int64_t op::v1::NonMaxSuppression::max_boxes_output_from_input() const
|
||||
{
|
||||
int64_t max_output_boxes{0};
|
||||
|
||||
const auto max_output_boxes_input =
|
||||
as_type_ptr<op::Constant>(input_value(2).get_node_shared_ptr());
|
||||
const auto max_output_boxes_input = get_constant_from_source(input_value(2));
|
||||
max_output_boxes = max_output_boxes_input->cast_vector<int64_t>().at(0);
|
||||
|
||||
return max_output_boxes;
|
||||
@ -395,13 +395,13 @@ void op::v3::NonMaxSuppression::validate_and_infer_types()
|
||||
if (boxes_ps.rank().is_static() && scores_ps.rank().is_static())
|
||||
{
|
||||
const auto num_boxes_boxes = boxes_ps[1];
|
||||
const auto max_output_boxes_per_class_node = input_value(2).get_node_shared_ptr();
|
||||
if (num_boxes_boxes.is_static() && scores_ps[1].is_static() &&
|
||||
op::is_constant(max_output_boxes_per_class_node))
|
||||
const auto max_output_boxes_input = get_constant_from_source(input_value(2));
|
||||
if (num_boxes_boxes.is_static() && scores_ps[1].is_static() && max_output_boxes_input)
|
||||
{
|
||||
const auto num_boxes = num_boxes_boxes.get_length();
|
||||
const auto num_classes = scores_ps[1].get_length();
|
||||
const auto max_output_boxes_per_class = max_boxes_output_from_input();
|
||||
const auto max_output_boxes_per_class =
|
||||
max_output_boxes_input->cast_vector<int64_t>().at(0);
|
||||
|
||||
out_shape[0] = std::min(num_boxes, max_output_boxes_per_class * num_classes);
|
||||
}
|
||||
@ -413,8 +413,7 @@ int64_t op::v3::NonMaxSuppression::max_boxes_output_from_input() const
|
||||
{
|
||||
int64_t max_output_boxes{0};
|
||||
|
||||
const auto max_output_boxes_input =
|
||||
as_type_ptr<op::Constant>(input_value(2).get_node_shared_ptr());
|
||||
const auto max_output_boxes_input = get_constant_from_source(input_value(2));
|
||||
max_output_boxes = max_output_boxes_input->cast_vector<int64_t>().at(0);
|
||||
|
||||
return max_output_boxes;
|
||||
@ -530,13 +529,14 @@ void op::v4::NonMaxSuppression::validate_and_infer_types()
|
||||
if (boxes_ps.rank().is_static() && scores_ps.rank().is_static())
|
||||
{
|
||||
const auto num_boxes_boxes = boxes_ps[1];
|
||||
const auto max_output_boxes_per_class_node = input_value(2).get_node_shared_ptr();
|
||||
const auto max_output_boxes_input = get_constant_from_source(input_value(2));
|
||||
if (num_boxes_boxes.is_static() && scores_ps[0].is_static() && scores_ps[1].is_static() &&
|
||||
op::is_constant(max_output_boxes_per_class_node))
|
||||
max_output_boxes_input)
|
||||
{
|
||||
const auto num_boxes = num_boxes_boxes.get_length();
|
||||
const auto num_classes = scores_ps[1].get_length();
|
||||
const auto max_output_boxes_per_class = max_boxes_output_from_input();
|
||||
const auto max_output_boxes_per_class =
|
||||
max_output_boxes_input->cast_vector<int64_t>().at(0);
|
||||
|
||||
out_shape[0] = std::min(num_boxes, max_output_boxes_per_class) * num_classes *
|
||||
scores_ps[0].get_length();
|
||||
@ -838,7 +838,7 @@ int64_t op::v5::NonMaxSuppression::max_boxes_output_from_input() const
|
||||
}
|
||||
|
||||
const auto max_output_boxes_input =
|
||||
as_type_ptr<op::Constant>(input_value(max_output_boxes_port).get_node_shared_ptr());
|
||||
get_constant_from_source(input_value(max_output_boxes_port));
|
||||
max_output_boxes = max_output_boxes_input->cast_vector<int64_t>().at(0);
|
||||
|
||||
return max_output_boxes;
|
||||
@ -853,8 +853,7 @@ float op::v5::NonMaxSuppression::iou_threshold_from_input() const
|
||||
return iou_threshold;
|
||||
}
|
||||
|
||||
const auto iou_threshold_input =
|
||||
as_type_ptr<op::Constant>(input_value(iou_threshold_port).get_node_shared_ptr());
|
||||
const auto iou_threshold_input = get_constant_from_source(input_value(iou_threshold_port));
|
||||
iou_threshold = iou_threshold_input->cast_vector<float>().at(0);
|
||||
|
||||
return iou_threshold;
|
||||
@ -869,8 +868,7 @@ float op::v5::NonMaxSuppression::score_threshold_from_input() const
|
||||
return score_threshold;
|
||||
}
|
||||
|
||||
const auto score_threshold_input =
|
||||
as_type_ptr<op::Constant>(input_value(score_threshold_port).get_node_shared_ptr());
|
||||
const auto score_threshold_input = get_constant_from_source(input_value(score_threshold_port));
|
||||
score_threshold = score_threshold_input->cast_vector<float>().at(0);
|
||||
|
||||
return score_threshold;
|
||||
@ -885,8 +883,7 @@ float op::v5::NonMaxSuppression::soft_nms_sigma_from_input() const
|
||||
return soft_nms_sigma;
|
||||
}
|
||||
|
||||
const auto soft_nms_sigma_input =
|
||||
as_type_ptr<op::Constant>(input_value(soft_nms_sigma_port).get_node_shared_ptr());
|
||||
const auto soft_nms_sigma_input = get_constant_from_source(input_value(soft_nms_sigma_port));
|
||||
soft_nms_sigma = soft_nms_sigma_input->cast_vector<float>().at(0);
|
||||
|
||||
return soft_nms_sigma;
|
||||
@ -927,9 +924,8 @@ void op::v5::NonMaxSuppression::validate_and_infer_types()
|
||||
if (boxes_ps.rank().is_static() && scores_ps.rank().is_static() && get_input_size() > 2)
|
||||
{
|
||||
const auto num_boxes_boxes = boxes_ps[1];
|
||||
const auto max_output_boxes_per_class_node = input_value(2).get_node_shared_ptr();
|
||||
if (num_boxes_boxes.is_static() && scores_ps[0].is_static() && scores_ps[1].is_static() &&
|
||||
op::is_constant(max_output_boxes_per_class_node))
|
||||
has_and_set_equal_bounds(input_value(2)))
|
||||
{
|
||||
const auto num_boxes = num_boxes_boxes.get_length();
|
||||
const auto num_classes = scores_ps[1].get_length();
|
||||
|
@ -15,6 +15,7 @@
|
||||
//*****************************************************************************
|
||||
|
||||
#include "ngraph/op/non_zero.hpp"
|
||||
#include <ngraph/validation_util.hpp>
|
||||
#include "itt.hpp"
|
||||
#include "ngraph/op/op.hpp"
|
||||
#include "ngraph/runtime/host_tensor.hpp"
|
||||
@ -78,6 +79,17 @@ void op::v3::NonZero::validate_and_infer_types()
|
||||
}
|
||||
|
||||
set_input_is_relevant_to_shape(0);
|
||||
|
||||
if (const auto& input_constant = get_constant_from_source(input_value(0)))
|
||||
{ // input_value is available to calculate output shape
|
||||
const auto& input_data = std::make_shared<HostTensor>(input_constant);
|
||||
auto output = std::make_shared<HostTensor>(m_output_type, get_output_partial_shape(0));
|
||||
if (!evaluate({output}, {input_data}))
|
||||
return;
|
||||
set_output_type(0, m_output_type, output->get_partial_shape());
|
||||
get_output_tensor(0).set_lower_value(output);
|
||||
get_output_tensor(0).set_upper_value(output);
|
||||
}
|
||||
}
|
||||
|
||||
shared_ptr<Node> op::v3::NonZero::clone_with_new_inputs(const OutputVector& new_args) const
|
||||
|
@ -15,6 +15,7 @@
|
||||
//*****************************************************************************
|
||||
#include <algorithm>
|
||||
#include <iterator>
|
||||
#include <ngraph/validation_util.hpp>
|
||||
#include "itt.hpp"
|
||||
|
||||
#include "ngraph/attribute_visitor.hpp"
|
||||
@ -67,7 +68,8 @@ void op::NormalizeL2::pre_validate_and_infer_types()
|
||||
const auto& input_rank = input_pshape.rank();
|
||||
const auto& axes_rank = axes_pshape.rank();
|
||||
|
||||
NODE_VALIDATION_CHECK(this, op::is_constant(axes_node), "Input axes must be Constant type");
|
||||
NODE_VALIDATION_CHECK(
|
||||
this, has_and_set_equal_bounds(input_value(1)), "Input axes must be Constant type");
|
||||
|
||||
if (axes_rank.is_static())
|
||||
{
|
||||
@ -99,8 +101,7 @@ void op::NormalizeL2::pre_validate_and_infer_types()
|
||||
AxisSet op::NormalizeL2::get_reduction_axes() const
|
||||
{
|
||||
AxisSet axes;
|
||||
auto axes_input_node = input_value(1).get_node_shared_ptr();
|
||||
if (auto const_op = as_type_ptr<op::Constant>(axes_input_node))
|
||||
if (auto const_op = get_constant_from_source(input_value(1)))
|
||||
{
|
||||
axes = const_op->get_axis_set_val();
|
||||
}
|
||||
|
@ -74,10 +74,10 @@ void op::v1::OneHot::validate_and_infer_types()
|
||||
off_value_shape.is_dynamic() || is_scalar(off_value_shape.to_shape()),
|
||||
"off_value input must be scalar.");
|
||||
|
||||
const auto& depth = input_value(1).get_node_shared_ptr();
|
||||
PartialShape result_shape{PartialShape::dynamic()};
|
||||
|
||||
if (indices_shape.is_static() && indices_shape.rank().is_static() && op::is_constant(depth))
|
||||
const auto& depth = input_value(1).get_node_shared_ptr();
|
||||
const auto& depth_constant = get_constant_from_source(input_value(1));
|
||||
if (indices_shape.is_static() && indices_shape.rank().is_static() && depth_constant)
|
||||
{
|
||||
const auto indices_rank = indices_shape.rank().get_length();
|
||||
|
||||
@ -103,9 +103,7 @@ void op::v1::OneHot::validate_and_infer_types()
|
||||
depth->get_shape(),
|
||||
" elements).");
|
||||
|
||||
const auto depth_constant = as_type_ptr<op::Constant>(depth);
|
||||
int64_t depth_val = depth_constant->cast_vector<int64_t>()[0];
|
||||
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
depth_val > 0,
|
||||
"The value of 'depth' must be a positive number.",
|
||||
|
@ -15,6 +15,7 @@
|
||||
//*****************************************************************************
|
||||
|
||||
#include "ngraph/op/pad.hpp"
|
||||
#include <ngraph/validation_util.hpp>
|
||||
#include "itt.hpp"
|
||||
#include "ngraph/attribute_visitor.hpp"
|
||||
#include "ngraph/except.hpp"
|
||||
@ -53,9 +54,8 @@ op::v1::Pad::Pad(const Output<Node>& arg,
|
||||
|
||||
CoordinateDiff op::v1::Pad::get_pads_begin() const
|
||||
{
|
||||
auto pads_begin_node = input_value(1).get_node_shared_ptr();
|
||||
CoordinateDiff pads_begin_coord{};
|
||||
if (auto pads_begin_const = as_type_ptr<op::Constant>(pads_begin_node))
|
||||
if (auto pads_begin_const = get_constant_from_source(input_value(1)))
|
||||
{
|
||||
pads_begin_coord = pads_begin_const->cast_vector<ptrdiff_t>();
|
||||
}
|
||||
@ -64,9 +64,8 @@ CoordinateDiff op::v1::Pad::get_pads_begin() const
|
||||
|
||||
CoordinateDiff op::v1::Pad::get_pads_end() const
|
||||
{
|
||||
auto pads_end_node = input_value(2).get_node_shared_ptr();
|
||||
CoordinateDiff pads_end_coord{};
|
||||
if (auto pads_end_const = as_type_ptr<op::Constant>(pads_end_node))
|
||||
if (auto pads_end_const = get_constant_from_source(input_value(2)))
|
||||
{
|
||||
pads_end_coord = pads_end_const->cast_vector<ptrdiff_t>();
|
||||
}
|
||||
@ -161,10 +160,7 @@ void op::v1::Pad::validate_and_infer_types()
|
||||
const auto& pads_begin_coord = get_pads_begin();
|
||||
const auto& pads_end_coord = get_pads_end();
|
||||
|
||||
auto pads_begin_node = input_value(1).get_node_shared_ptr();
|
||||
auto pads_end_node = input_value(2).get_node_shared_ptr();
|
||||
if (arg_shape_rank.is_static() && op::is_constant(pads_begin_node) &&
|
||||
op::is_constant(pads_end_node))
|
||||
if (arg_shape_rank.is_static() && !pads_begin_coord.empty() && !pads_end_coord.empty())
|
||||
{
|
||||
const auto implied_rank = pads_begin_coord.size();
|
||||
std::vector<Dimension> result_dims(implied_rank, Dimension::dynamic());
|
||||
|
@ -14,6 +14,7 @@
|
||||
// limitations under the License.
|
||||
//*****************************************************************************
|
||||
|
||||
#include <ngraph/validation_util.hpp>
|
||||
#include "itt.hpp"
|
||||
|
||||
#include "ngraph/op/constant.hpp"
|
||||
@ -63,7 +64,7 @@ void op::PriorBox::validate_and_infer_types()
|
||||
|
||||
set_input_is_relevant_to_shape(0);
|
||||
|
||||
if (auto const_shape = as_type_ptr<op::Constant>(input_value(0).get_node_shared_ptr()))
|
||||
if (auto const_shape = get_constant_from_source(input_value(0)))
|
||||
{
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
shape_size(const_shape->get_shape()) == 2,
|
||||
|
@ -14,6 +14,7 @@
|
||||
// limitations under the License.
|
||||
//*****************************************************************************
|
||||
|
||||
#include <ngraph/validation_util.hpp>
|
||||
#include "itt.hpp"
|
||||
|
||||
#include "ngraph/op/constant.hpp"
|
||||
@ -70,7 +71,7 @@ void op::PriorBoxClustered::validate_and_infer_types()
|
||||
|
||||
set_input_is_relevant_to_shape(0);
|
||||
|
||||
if (auto const_shape = as_type_ptr<op::Constant>(input_value(0).get_node_shared_ptr()))
|
||||
if (auto const_shape = get_constant_from_source(input_value(0).get_node_shared_ptr()))
|
||||
{
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
shape_size(const_shape->get_shape()) == 2,
|
||||
|
@ -15,6 +15,7 @@
|
||||
//*****************************************************************************
|
||||
|
||||
#include <algorithm>
|
||||
#include <ngraph/validation_util.hpp>
|
||||
|
||||
#include "itt.hpp"
|
||||
#include "ngraph/op/constant.hpp"
|
||||
@ -110,9 +111,9 @@ void op::v4::Range::validate_and_infer_types()
|
||||
"'step' input scalar should be a numeric type. Got: ",
|
||||
get_input_element_type(2));
|
||||
|
||||
auto const_start = as_type_ptr<op::Constant>(this->input_value(0).get_node_shared_ptr());
|
||||
auto const_stop = as_type_ptr<op::Constant>(this->input_value(1).get_node_shared_ptr());
|
||||
auto const_step = as_type_ptr<op::Constant>(this->input_value(2).get_node_shared_ptr());
|
||||
auto const_start = get_constant_from_source(input_value(0));
|
||||
auto const_stop = get_constant_from_source(input_value(1));
|
||||
auto const_step = get_constant_from_source(input_value(2));
|
||||
|
||||
double start = 0;
|
||||
double stop = 0;
|
||||
@ -360,9 +361,9 @@ static
|
||||
template <typename T>
|
||||
static PartialShape infer_output_shape(const op::v0::Range* node, const element::Type& /* et */)
|
||||
{
|
||||
auto const_start = as_type_ptr<op::Constant>(node->input_value(0).get_node_shared_ptr());
|
||||
auto const_stop = as_type_ptr<op::Constant>(node->input_value(1).get_node_shared_ptr());
|
||||
auto const_step = as_type_ptr<op::Constant>(node->input_value(2).get_node_shared_ptr());
|
||||
auto const_start = get_constant_from_source(node->input_value(0));
|
||||
auto const_stop = get_constant_from_source(node->input_value(1));
|
||||
auto const_step = get_constant_from_source(node->input_value(2));
|
||||
|
||||
T start = static_cast<T>(0);
|
||||
T stop = static_cast<T>(0);
|
||||
|
@ -15,6 +15,7 @@
|
||||
//*****************************************************************************
|
||||
|
||||
#include "ngraph/op/reduce_prod.hpp"
|
||||
#include <ngraph/validation_util.hpp>
|
||||
#include "itt.hpp"
|
||||
#include "ngraph/graph_util.hpp"
|
||||
#include "ngraph/runtime/host_tensor.hpp"
|
||||
@ -87,3 +88,25 @@ bool op::v1::ReduceProd::evaluate(const HostTensorVector& outputs,
|
||||
return reduce_prod::evaluate_product(
|
||||
inputs[0], outputs[0], get_reduction_axes(), get_keep_dims());
|
||||
}
|
||||
|
||||
bool op::v1::ReduceProd::evaluate_lower(const HostTensorVector& output_values) const
|
||||
{
|
||||
if (!input_value(1).get_tensor().has_and_set_bound())
|
||||
return false;
|
||||
HostTensorPtr lb = input_value(0).get_tensor().get_lower_value(),
|
||||
ub = input_value(0).get_tensor().get_upper_value();
|
||||
if (!lb || !ub || !host_tensor_is_positive(lb) || !host_tensor_is_positive(ub))
|
||||
return false;
|
||||
return default_lower_bound_evaluator(this, output_values);
|
||||
}
|
||||
|
||||
bool op::v1::ReduceProd::evaluate_upper(const HostTensorVector& output_values) const
|
||||
{
|
||||
if (!input_value(1).get_tensor().has_and_set_bound())
|
||||
return false;
|
||||
HostTensorPtr lb = input_value(0).get_tensor().get_lower_value(),
|
||||
ub = input_value(0).get_tensor().get_upper_value();
|
||||
if (!lb || !ub || !host_tensor_is_positive(lb) || !host_tensor_is_positive(ub))
|
||||
return false;
|
||||
return default_upper_bound_evaluator(this, output_values);
|
||||
}
|
||||
|
@ -16,6 +16,7 @@
|
||||
|
||||
#include <algorithm>
|
||||
#include <iostream>
|
||||
#include <ngraph/validation_util.hpp>
|
||||
|
||||
#include "itt.hpp"
|
||||
#include "ngraph/function.hpp"
|
||||
@ -105,7 +106,8 @@ namespace reshapeop
|
||||
if (input_pshape.rank().is_static())
|
||||
for (size_t i = 0; i < input_pshape.rank().get_length(); ++i)
|
||||
{
|
||||
if (i < reshape_pattern.size() && reshape_pattern[i] == 0)
|
||||
if (i < reshape_pattern.size() && reshape_pattern[i].get_min_length() == 0 &&
|
||||
reshape_pattern[i].get_max_length() == 0)
|
||||
continue;
|
||||
input_product *= input_pshape[i];
|
||||
}
|
||||
@ -240,20 +242,26 @@ void op::v1::Reshape::validate_and_infer_types()
|
||||
std::vector<Dimension> reshape_pattern;
|
||||
int64_t minus_one_idx = -1;
|
||||
|
||||
if (const auto constant = as_type_ptr<op::Constant>(get_input_node_shared_ptr(1)))
|
||||
HostTensorPtr lb, ub;
|
||||
std::tie(lb, ub) = evaluate_both_bounds(get_input_source_output(1));
|
||||
if (lb && ub)
|
||||
{
|
||||
const auto pattern_vector = constant->cast_vector<int64_t>();
|
||||
for (size_t i = 0; i < pattern_vector.size(); ++i)
|
||||
const auto lower_bound = std::make_shared<op::Constant>(lb)->cast_vector<int64_t>();
|
||||
const auto upper_bound = std::make_shared<op::Constant>(ub)->cast_vector<int64_t>();
|
||||
NGRAPH_CHECK(lower_bound.size() == upper_bound.size());
|
||||
for (size_t i = 0; i < lower_bound.size(); ++i)
|
||||
{
|
||||
NODE_VALIDATION_CHECK(this, pattern_vector[i] >= -1, "Dim size cannot be less than -1");
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
lower_bound[i] >= -1 && upper_bound[i] >= -1,
|
||||
"Dim size cannot be less than -1");
|
||||
|
||||
if (pattern_vector[i] == -1)
|
||||
if (lower_bound[i] == -1 && upper_bound[i] == -1)
|
||||
{ // ctor of Dimension(-1) would turn input Dimension(0, max_int)
|
||||
NODE_VALIDATION_CHECK(
|
||||
this, minus_one_idx == -1, "More than one dimension has size of -1");
|
||||
minus_one_idx = static_cast<int64_t>(i);
|
||||
}
|
||||
reshape_pattern.emplace_back(pattern_vector[i]);
|
||||
reshape_pattern.emplace_back(lower_bound[i], upper_bound[i]);
|
||||
}
|
||||
}
|
||||
|
||||
@ -332,6 +340,20 @@ bool op::v1::Reshape::evaluate(const HostTensorVector& outputs,
|
||||
return evaluate_reshape(outputs, inputs);
|
||||
}
|
||||
|
||||
bool op::v1::Reshape::evaluate_lower(const HostTensorVector& output_values) const
|
||||
{
|
||||
if (!input_value(1).get_tensor().has_and_set_bound())
|
||||
return false;
|
||||
return default_lower_bound_evaluator(this, output_values);
|
||||
}
|
||||
|
||||
bool op::v1::Reshape::evaluate_upper(const HostTensorVector& output_values) const
|
||||
{
|
||||
if (!input_value(1).get_tensor().has_and_set_bound())
|
||||
return false;
|
||||
return default_upper_bound_evaluator(this, output_values);
|
||||
}
|
||||
|
||||
bool op::v1::Reshape::constant_fold(OutputVector& output_values, const OutputVector& inputs_values)
|
||||
{
|
||||
if (get_output_partial_shape(0).is_dynamic())
|
||||
|
@ -16,6 +16,7 @@
|
||||
|
||||
#include <algorithm>
|
||||
#include <iterator>
|
||||
#include <ngraph/validation_util.hpp>
|
||||
#include <sstream>
|
||||
#include "itt.hpp"
|
||||
|
||||
@ -99,12 +100,9 @@ void op::v1::Reverse::validate_and_infer_types()
|
||||
if (input_rank.is_static())
|
||||
{
|
||||
const auto rank = input_rank.get_length();
|
||||
const auto rev_axes_node = input_value(1).get_node_shared_ptr();
|
||||
|
||||
if (op::is_constant(rev_axes_node))
|
||||
if (const auto& rev_axes_constant = get_constant_from_source(input_value(1)))
|
||||
{
|
||||
const auto rev_axes_constant = as_type_ptr<op::Constant>(rev_axes_node);
|
||||
|
||||
if (m_mode == Mode::INDEX)
|
||||
{
|
||||
const AxisSet rev_axes = rev_axes_constant->get_axis_set_val();
|
||||
|
@ -95,9 +95,16 @@ void op::v3::ScatterElementsUpdate::validate_and_infer_types()
|
||||
" and: ",
|
||||
updates_shape);
|
||||
|
||||
if (ngraph::op::is_constant(input_value(3).get_node()) && data_shape.rank().is_static())
|
||||
set_output_size(1);
|
||||
set_output_type(0, data_et, data_shape);
|
||||
|
||||
if (data_shape.is_dynamic())
|
||||
set_input_is_relevant_to_shape(0);
|
||||
if (data_shape.rank().is_dynamic())
|
||||
return;
|
||||
|
||||
if (const auto& axis_input = get_constant_from_source(input_value(3)))
|
||||
{
|
||||
const auto axis_input = as_type_ptr<op::v0::Constant>(input_value(3).get_node_shared_ptr());
|
||||
auto axis = axis_input->cast_vector<int64_t>().at(0);
|
||||
|
||||
int64_t data_rank_length = data_shape.rank().get_length();
|
||||
@ -114,14 +121,6 @@ void op::v3::ScatterElementsUpdate::validate_and_infer_types()
|
||||
"]. Got axis value: ",
|
||||
axis);
|
||||
}
|
||||
|
||||
if (data_shape.is_dynamic())
|
||||
{
|
||||
set_input_is_relevant_to_shape(0);
|
||||
}
|
||||
|
||||
set_output_size(1);
|
||||
set_output_type(0, data_et, data_shape);
|
||||
}
|
||||
|
||||
shared_ptr<Node>
|
||||
|
@ -146,6 +146,7 @@ namespace detail
|
||||
NGRAPH_TYPE_CASE(evaluate_select, u32, output_values, input_values, autob);
|
||||
NGRAPH_TYPE_CASE(evaluate_select, u64, output_values, input_values, autob);
|
||||
NGRAPH_TYPE_CASE(evaluate_select, bf16, output_values, input_values, autob);
|
||||
NGRAPH_TYPE_CASE(evaluate_select, f16, output_values, input_values, autob);
|
||||
NGRAPH_TYPE_CASE(evaluate_select, f32, output_values, input_values, autob);
|
||||
NGRAPH_TYPE_CASE(evaluate_select, f64, output_values, input_values, autob);
|
||||
NGRAPH_TYPE_CASE(evaluate_select, boolean, output_values, input_values, autob);
|
||||
@ -161,5 +162,6 @@ bool op::v1::Select::evaluate(const HostTensorVector& output_values,
|
||||
{
|
||||
NGRAPH_OP_SCOPE(v1_Select_evaluate);
|
||||
const auto autob = get_auto_broadcast();
|
||||
return detail::evaluate_select(output_values, input_values, autob, get_output_element_type(0));
|
||||
return detail::evaluate_select(
|
||||
output_values, input_values, autob, output_values[0]->get_element_type());
|
||||
}
|
||||
|
@ -21,8 +21,8 @@
|
||||
#include "ngraph/op/concat.hpp"
|
||||
#include "ngraph/op/constant.hpp"
|
||||
#include "ngraph/op/gather.hpp"
|
||||
#include "ngraph/op/select.hpp"
|
||||
#include "ngraph/op/shape_of.hpp"
|
||||
#include "ngraph/pass/constant_folding.hpp"
|
||||
#include "ngraph/runtime/host_tensor.hpp"
|
||||
#include "ngraph/runtime/reference/shape_of.hpp"
|
||||
#include "ngraph/type/element_type_traits.hpp"
|
||||
@ -46,7 +46,8 @@ void op::v3::ShapeOf::validate_and_infer_types()
|
||||
m_output_type == element::i64 || m_output_type == element::i32,
|
||||
"Output type must be i32 or i64");
|
||||
set_input_is_relevant_to_value(0, false);
|
||||
set_output_type(0, m_output_type, PartialShape{get_input_partial_shape(0).rank()});
|
||||
const auto input_partial_shape = get_input_partial_shape(0);
|
||||
set_output_type(0, m_output_type, PartialShape{input_partial_shape.rank()});
|
||||
}
|
||||
|
||||
bool ngraph::op::v3::ShapeOf::visit_attributes(AttributeVisitor& visitor)
|
||||
@ -152,6 +153,73 @@ namespace shape_of
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool evaluate_bound_shape(const Node* shape_of_node,
|
||||
const HostTensorVector& output_values,
|
||||
bool is_upper)
|
||||
{
|
||||
const auto& input_partial_shape = shape_of_node->get_input_partial_shape(0);
|
||||
if (input_partial_shape.rank().is_dynamic())
|
||||
return false;
|
||||
const auto rank = input_partial_shape.rank().get_length();
|
||||
auto pshape_low = PartialShape::dynamic(rank), pshape_up = PartialShape::dynamic(rank);
|
||||
for (Dimension::value_type i = 0; i < rank; ++i)
|
||||
{
|
||||
Interval interval = input_partial_shape[i].get_interval();
|
||||
pshape_low[i] = interval.get_min_val();
|
||||
pshape_up[i] = Dimension(interval.get_max_val()).is_dynamic()
|
||||
? Dimension(interval.get_max_val() - 1)
|
||||
: interval.get_max_val();
|
||||
}
|
||||
NGRAPH_CHECK(pshape_up.is_static() && pshape_low.is_static());
|
||||
const auto input_et = shape_of_node->get_input_element_type(0);
|
||||
const auto output_et = shape_of_node->get_output_element_type(0);
|
||||
if (pshape_low.to_shape() == pshape_up.to_shape())
|
||||
{
|
||||
shape_of_node->evaluate(output_values,
|
||||
{std::make_shared<HostTensor>(input_et, pshape_low)});
|
||||
shape_of_node->get_output_tensor(0).set_lower_value(output_values[0]);
|
||||
shape_of_node->get_output_tensor(0).set_upper_value(output_values[0]);
|
||||
}
|
||||
else
|
||||
{
|
||||
HostTensorVector upper =
|
||||
is_upper ? output_values
|
||||
: HostTensorVector{std::make_shared<HostTensor>(
|
||||
output_et, PartialShape{pshape_up.rank().get_length()})};
|
||||
shape_of_node->evaluate(upper, {std::make_shared<HostTensor>(input_et, pshape_up)});
|
||||
shape_of_node->get_output_tensor(0).set_upper_value(upper[0]);
|
||||
|
||||
HostTensorVector lower =
|
||||
!is_upper ? output_values
|
||||
: HostTensorVector{std::make_shared<HostTensor>(
|
||||
output_et, PartialShape{pshape_low.rank().get_length()})};
|
||||
shape_of_node->evaluate(lower, {std::make_shared<HostTensor>(input_et, pshape_low)});
|
||||
shape_of_node->get_output_tensor(0).set_lower_value(lower[0]);
|
||||
|
||||
vector<bool> dynamic_mask; // true if dimension is dynamic
|
||||
for (const auto& i : input_partial_shape)
|
||||
dynamic_mask.push_back(Dimension(i.get_interval().get_max_val()).is_dynamic());
|
||||
auto mask_const =
|
||||
ngraph::op::Constant::create(element::boolean, {dynamic_mask.size()}, dynamic_mask);
|
||||
auto dynamic_min_const = ngraph::op::Constant::create(output_et, {}, {0});
|
||||
auto dynamic_max_const = ngraph::op::Constant::create(
|
||||
output_et,
|
||||
{},
|
||||
{output_et == element::i64 ? std::numeric_limits<int64_t>::max()
|
||||
: std::numeric_limits<int32_t>::max()});
|
||||
|
||||
op::v1::Select().evaluate(lower,
|
||||
{std::make_shared<HostTensor>(mask_const),
|
||||
std::make_shared<HostTensor>(dynamic_min_const),
|
||||
lower[0]});
|
||||
op::v1::Select().evaluate(upper,
|
||||
{std::make_shared<HostTensor>(mask_const),
|
||||
std::make_shared<HostTensor>(dynamic_max_const),
|
||||
upper[0]});
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
bool op::v3::ShapeOf::evaluate(const HostTensorVector& output_values,
|
||||
@ -161,6 +229,16 @@ bool op::v3::ShapeOf::evaluate(const HostTensorVector& output_values,
|
||||
return shape_of::evaluate_shape_of(output_values[0], input_values[0]);
|
||||
}
|
||||
|
||||
bool op::v3::ShapeOf::evaluate_lower(const HostTensorVector& output_values) const
|
||||
{
|
||||
return shape_of::evaluate_bound_shape(this, output_values, false);
|
||||
}
|
||||
|
||||
bool op::v3::ShapeOf::evaluate_upper(const HostTensorVector& output_values) const
|
||||
{
|
||||
return shape_of::evaluate_bound_shape(this, output_values, true);
|
||||
}
|
||||
|
||||
bool op::v3::ShapeOf::constant_fold(OutputVector& output_values, const OutputVector& input_values)
|
||||
{
|
||||
OV_ITT_SCOPED_TASK(itt::domains::nGraph, "op::v3::ShapeOf::constant_fold");
|
||||
@ -220,3 +298,13 @@ bool op::v0::ShapeOf::constant_fold(OutputVector& output_values, const OutputVec
|
||||
return false;
|
||||
return shape_of::constant_fold_shape_of(this, output_values[0], input_values[0], m_is_foldable);
|
||||
}
|
||||
|
||||
bool op::v0::ShapeOf::evaluate_lower(const HostTensorVector& output_values) const
|
||||
{
|
||||
return shape_of::evaluate_bound_shape(this, output_values, false);
|
||||
}
|
||||
|
||||
bool op::v0::ShapeOf::evaluate_upper(const HostTensorVector& output_values) const
|
||||
{
|
||||
return shape_of::evaluate_bound_shape(this, output_values, true);
|
||||
}
|
@ -16,6 +16,7 @@
|
||||
#include <cmath>
|
||||
#include <cstddef>
|
||||
#include <memory>
|
||||
#include <ngraph/validation_util.hpp>
|
||||
#include <numeric>
|
||||
#include "itt.hpp"
|
||||
|
||||
@ -74,9 +75,11 @@ void op::v1::SpaceToBatch::validate_and_infer_types()
|
||||
auto pads_begin = input_value(2);
|
||||
auto pads_end = input_value(3);
|
||||
|
||||
if (ngraph::op::is_constant(block.get_node_shared_ptr()) &&
|
||||
ngraph::op::is_constant(pads_begin.get_node_shared_ptr()) &&
|
||||
ngraph::op::is_constant(pads_end.get_node_shared_ptr()) && data_pshape.is_static())
|
||||
const auto& block_const = get_constant_from_source(block);
|
||||
const auto& pads_begin_const = get_constant_from_source(pads_begin);
|
||||
const auto& pads_end_const = get_constant_from_source(pads_end);
|
||||
|
||||
if (block_const && pads_begin_const && pads_end_const && data_pshape.is_static())
|
||||
{
|
||||
const auto& data_shape = data.get_shape();
|
||||
|
||||
@ -87,13 +90,9 @@ void op::v1::SpaceToBatch::validate_and_infer_types()
|
||||
data_shape.size(),
|
||||
")");
|
||||
|
||||
auto block_val = std::dynamic_pointer_cast<op::Constant>(block.get_node_shared_ptr())
|
||||
->cast_vector<int64_t>();
|
||||
auto pads_begin_val =
|
||||
std::dynamic_pointer_cast<op::Constant>(pads_begin.get_node_shared_ptr())
|
||||
->cast_vector<int64_t>();
|
||||
auto pads_end_val = std::dynamic_pointer_cast<op::Constant>(pads_end.get_node_shared_ptr())
|
||||
->cast_vector<int64_t>();
|
||||
auto block_val = block_const->cast_vector<int64_t>();
|
||||
auto pads_begin_val = pads_begin_const->cast_vector<int64_t>();
|
||||
auto pads_end_val = pads_end_const->cast_vector<int64_t>();
|
||||
|
||||
int64_t block_prod = 1;
|
||||
for (long idx : block_val)
|
||||
|
@ -63,9 +63,9 @@ void op::v1::Split::validate_and_infer_types()
|
||||
this, axis_et.is_integral(), "The 'axis' input only accepts integral types");
|
||||
|
||||
PartialShape each_output_shape{data_ps};
|
||||
if (op::is_constant(input_value(1).get_node()) && data_ps.rank().is_static())
|
||||
const auto axis_input = get_constant_from_source(input_value(1));
|
||||
if (axis_input && data_ps.rank().is_static())
|
||||
{
|
||||
const auto axis_input = as_type_ptr<op::Constant>(input_value(1).get_node_shared_ptr());
|
||||
auto axis = axis_input->cast_vector<int64_t>()[0];
|
||||
|
||||
const auto data_rank = get_input_partial_shape(0).rank();
|
||||
|
@ -52,7 +52,7 @@ void op::Squeeze::pre_validate_and_infer_types()
|
||||
bool data_has_dynamic_rank = data.get_partial_shape().rank().is_dynamic();
|
||||
bool data_has_dynamic_shape = data.get_partial_shape().is_dynamic();
|
||||
|
||||
auto axes_constant = as_type_ptr<op::v0::Constant>(axes_node);
|
||||
auto axes_constant = get_constant_from_source(axes_node);
|
||||
bool axes_is_empty_constant =
|
||||
(axes_constant) ? axes_constant->cast_vector<int64_t>().empty() : false;
|
||||
|
||||
@ -184,6 +184,20 @@ bool op::v0::Squeeze::evaluate(const HostTensorVector& outputs,
|
||||
return squeeze::evaluate_squeeze(inputs[0], inputs[1], outputs[0]);
|
||||
}
|
||||
|
||||
bool op::v0::Squeeze::evaluate_lower(const HostTensorVector& output_values) const
|
||||
{
|
||||
if (inputs().size() > 1 && !input_value(1).get_tensor().has_and_set_bound())
|
||||
return false;
|
||||
return default_lower_bound_evaluator(this, output_values);
|
||||
}
|
||||
|
||||
bool op::v0::Squeeze::evaluate_upper(const HostTensorVector& output_values) const
|
||||
{
|
||||
if (inputs().size() > 1 && !input_value(1).get_tensor().has_and_set_bound())
|
||||
return false;
|
||||
return default_upper_bound_evaluator(this, output_values);
|
||||
}
|
||||
|
||||
bool op::v0::Squeeze::constant_fold(OutputVector& output_values, const OutputVector& inputs_values)
|
||||
{
|
||||
if (get_output_partial_shape(0).is_dynamic())
|
||||
|
@ -186,9 +186,9 @@ void op::v1::StridedSlice::validate_and_infer_types()
|
||||
set_input_is_relevant_to_shape(2);
|
||||
set_input_is_relevant_to_shape(3);
|
||||
|
||||
auto begin_const = as_type_ptr<op::Constant>(input_value(1).get_node_shared_ptr());
|
||||
auto end_const = as_type_ptr<op::Constant>(input_value(2).get_node_shared_ptr());
|
||||
auto strides = as_type_ptr<op::Constant>(input_value(3).get_node_shared_ptr());
|
||||
auto begin_const = get_constant_from_source(input_value(1));
|
||||
auto end_const = get_constant_from_source(input_value(2));
|
||||
auto strides = get_constant_from_source(input_value(3));
|
||||
|
||||
if (begin_const && end_const && strides)
|
||||
{
|
||||
@ -296,3 +296,21 @@ bool op::v1::StridedSlice::evaluate(const HostTensorVector& output_values,
|
||||
convert_mask_to_axis_set(get_ellipsis_mask()),
|
||||
output_values[0]);
|
||||
}
|
||||
|
||||
bool op::v1::StridedSlice::evaluate_lower(const HostTensorVector& output_values) const
|
||||
{
|
||||
if (!input_value(1).get_tensor().has_and_set_bound() ||
|
||||
!input_value(2).get_tensor().has_and_set_bound() ||
|
||||
!input_value(3).get_tensor().has_and_set_bound())
|
||||
return false;
|
||||
return default_lower_bound_evaluator(this, output_values);
|
||||
}
|
||||
|
||||
bool op::v1::StridedSlice::evaluate_upper(const HostTensorVector& output_values) const
|
||||
{
|
||||
if (!input_value(1).get_tensor().has_and_set_bound() ||
|
||||
!input_value(2).get_tensor().has_and_set_bound() ||
|
||||
!input_value(3).get_tensor().has_and_set_bound())
|
||||
return false;
|
||||
return default_upper_bound_evaluator(this, output_values);
|
||||
}
|
@ -15,6 +15,7 @@
|
||||
//*****************************************************************************
|
||||
|
||||
#include "ngraph/op/tile.hpp"
|
||||
#include <ngraph/validation_util.hpp>
|
||||
|
||||
#include "itt.hpp"
|
||||
#include "ngraph/op/constant.hpp"
|
||||
@ -51,41 +52,31 @@ void op::v0::Tile::validate_and_infer_types()
|
||||
|
||||
auto arg_shape = get_input_partial_shape(0);
|
||||
auto repeats_shape = get_input_partial_shape(1);
|
||||
auto repeats_rank = repeats_shape.rank();
|
||||
|
||||
NODE_VALIDATION_CHECK(this, repeats_rank.compatible(1), "Shape of repeats must be of rank 1");
|
||||
|
||||
auto out_shape = PartialShape::dynamic();
|
||||
|
||||
if (auto const_repeats = as_type_ptr<op::Constant>(input_value(1).get_node_shared_ptr()))
|
||||
NODE_VALIDATION_CHECK(
|
||||
this, repeats_shape.rank().compatible(1), "Shape of repeats must be of rank 1");
|
||||
PartialShape repeats_as_pshape;
|
||||
bool repeats_are_known =
|
||||
evaluate_as_partial_shape(get_input_source_output(1), repeats_as_pshape);
|
||||
std::vector<Dimension> repeats_value(repeats_as_pshape);
|
||||
if (repeats_are_known && !repeats_value.empty() && arg_shape.rank().is_static())
|
||||
{
|
||||
if (arg_shape.is_static())
|
||||
{
|
||||
auto data_shape = arg_shape.to_shape();
|
||||
auto data_rank = data_shape.size();
|
||||
auto repeats_val = const_repeats->cast_vector<int64_t>();
|
||||
auto repeats_rank = repeats_val.size();
|
||||
auto output_rank = std::max(data_rank, repeats_rank);
|
||||
std::vector<Dimension> data_shape(arg_shape);
|
||||
auto data_rank = data_shape.size();
|
||||
auto repeats_rank = repeats_value.size();
|
||||
auto output_rank = std::max(data_rank, repeats_rank);
|
||||
|
||||
// expand data shape and repeats to output rank
|
||||
data_shape.insert(data_shape.begin(), output_rank - data_rank, 1);
|
||||
repeats_val.insert(repeats_val.begin(), output_rank - repeats_rank, 1);
|
||||
// expand data shape and repeats to output rank
|
||||
data_shape.insert(data_shape.begin(), output_rank - data_rank, 1);
|
||||
repeats_value.insert(repeats_value.begin(), output_rank - repeats_rank, 1);
|
||||
|
||||
Shape output_shape(output_rank);
|
||||
for (size_t i = 0; i < output_rank; i++)
|
||||
{
|
||||
output_shape[i] = data_shape[i] * repeats_val[i];
|
||||
}
|
||||
set_output_type(0, arg_et, output_shape);
|
||||
}
|
||||
else
|
||||
{
|
||||
set_output_type(0, arg_et, out_shape);
|
||||
}
|
||||
auto output_shape = PartialShape::dynamic(output_rank);
|
||||
for (size_t i = 0; i < output_rank; i++)
|
||||
output_shape[i] = data_shape[i] * repeats_value[i];
|
||||
set_output_type(0, arg_et, output_shape);
|
||||
}
|
||||
else
|
||||
{
|
||||
set_output_type(0, arg_et, out_shape);
|
||||
set_output_type(0, arg_et, PartialShape::dynamic());
|
||||
}
|
||||
|
||||
set_input_is_relevant_to_shape(0);
|
||||
|
@ -295,28 +295,32 @@ void op::v1::TopK::validate_and_infer_types()
|
||||
if (output_shape.rank().is_static())
|
||||
{
|
||||
m_normalized_axis = ngraph::normalize_axis(this, m_axis, output_shape.rank());
|
||||
if (k != 0)
|
||||
|
||||
PartialShape k_as_shape;
|
||||
if (evaluate_as_partial_shape(input_value(1), k_as_shape))
|
||||
{
|
||||
output_shape[m_normalized_axis] = k;
|
||||
}
|
||||
else
|
||||
{
|
||||
auto max_k = maximum_value(input_value(1));
|
||||
if (max_k.first)
|
||||
if (k_as_shape.is_static())
|
||||
{
|
||||
const auto in_min = output_shape[m_normalized_axis].get_min_length();
|
||||
const auto in_max = output_shape[m_normalized_axis].get_max_length();
|
||||
const auto lower = std::min<Dimension::value_type>(in_min, max_k.second);
|
||||
const auto upper = in_max < 0
|
||||
? Dimension::dynamic().get_max_length()
|
||||
: std::max<Dimension::value_type>(in_max, max_k.second);
|
||||
output_shape[m_normalized_axis] = Dimension(lower, upper);
|
||||
output_shape[m_normalized_axis] = k_as_shape[0];
|
||||
}
|
||||
else
|
||||
{
|
||||
output_shape[m_normalized_axis] = -1;
|
||||
const auto in_min = output_shape[m_normalized_axis].get_min_length();
|
||||
const auto in_max = output_shape[m_normalized_axis].get_max_length();
|
||||
|
||||
const auto k_min = k_as_shape[0].get_min_length();
|
||||
const auto k_max = k_as_shape[0].get_max_length();
|
||||
|
||||
const auto lower = std::min<Dimension::value_type>(in_min, k_min);
|
||||
const auto upper = in_max < 0 ? Dimension::dynamic().get_max_length()
|
||||
: std::max<Dimension::value_type>(in_max, k_max);
|
||||
output_shape[m_normalized_axis] = Dimension(lower, upper);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
output_shape[m_normalized_axis] = -1;
|
||||
}
|
||||
}
|
||||
|
||||
set_output_size(2);
|
||||
|
@ -14,10 +14,9 @@
|
||||
// limitations under the License.
|
||||
//*****************************************************************************
|
||||
|
||||
#include <iostream>
|
||||
#include <ngraph/validation_util.hpp>
|
||||
|
||||
#include "itt.hpp"
|
||||
#include "ngraph/op/constant.hpp"
|
||||
#include "ngraph/op/transpose.hpp"
|
||||
#include "ngraph/runtime/opt_kernel/reshape.hpp"
|
||||
|
||||
@ -59,7 +58,7 @@ void op::v1::Transpose::validate_and_infer_types()
|
||||
|
||||
set_input_is_relevant_to_shape(1);
|
||||
|
||||
if (auto input_const = as_type_ptr<op::Constant>(input_value(1).get_node_shared_ptr()))
|
||||
if (const auto& input_const = get_constant_from_source(input_value(1)))
|
||||
{
|
||||
auto permutation = input_const->get_axis_vector_val();
|
||||
if (permutation.empty())
|
||||
|
@ -44,9 +44,8 @@ void op::v0::Unsqueeze::validate_and_infer_types()
|
||||
auto data_partial_shape = data.get_partial_shape();
|
||||
const auto data_rank = data_partial_shape.rank();
|
||||
|
||||
const auto axes_node = input_value(1).get_node_shared_ptr();
|
||||
|
||||
if (data_rank.is_dynamic() || !op::is_constant(axes_node))
|
||||
const auto axes_constant = get_constant_from_source(input_value(1));
|
||||
if (data_rank.is_dynamic() || !axes_constant)
|
||||
{
|
||||
set_output_type(0, get_input_element_type(0), PartialShape::dynamic());
|
||||
return;
|
||||
@ -55,7 +54,6 @@ void op::v0::Unsqueeze::validate_and_infer_types()
|
||||
uint64_t data_rank_value = data_partial_shape.rank().get_length();
|
||||
|
||||
// Get value of axes from Constant
|
||||
const auto axes_constant = as_type_ptr<op::v0::Constant>(axes_node);
|
||||
const auto axes_values = axes_constant->cast_vector<int64_t>();
|
||||
const auto expanded_rank = data_rank_value + axes_values.size();
|
||||
auto axes = normalize_axes(this->description(), axes_values, expanded_rank);
|
||||
@ -157,6 +155,20 @@ bool op::v0::Unsqueeze::evaluate(const HostTensorVector& outputs,
|
||||
return unsqueeze::evaluate_unsqueeze(inputs[0], inputs[1], outputs[0]);
|
||||
}
|
||||
|
||||
bool op::v0::Unsqueeze::evaluate_lower(const HostTensorVector& output_values) const
|
||||
{
|
||||
if (!input_value(1).get_tensor().has_and_set_bound())
|
||||
return false;
|
||||
return default_lower_bound_evaluator(this, output_values);
|
||||
}
|
||||
|
||||
bool op::v0::Unsqueeze::evaluate_upper(const HostTensorVector& output_values) const
|
||||
{
|
||||
if (!input_value(1).get_tensor().has_and_set_bound())
|
||||
return false;
|
||||
return default_upper_bound_evaluator(this, output_values);
|
||||
}
|
||||
|
||||
bool op::v0::Unsqueeze::constant_fold(OutputVector& output_values,
|
||||
const OutputVector& inputs_values)
|
||||
{
|
||||
|
@ -50,7 +50,7 @@ bool op::util::ArithmeticReduction::reduction_axes_constant() const
|
||||
const AxisSet op::util::ArithmeticReduction::get_reduction_axes() const
|
||||
{
|
||||
AxisSet axes;
|
||||
if (auto const_op = as_type<op::Constant>(input_value(1).get_node()))
|
||||
if (const auto& const_op = get_constant_from_source(input_value(1)))
|
||||
{
|
||||
const auto const_data = const_op->cast_vector<int64_t>();
|
||||
const auto input_data_rank = get_input_partial_shape(0).rank();
|
||||
@ -76,11 +76,11 @@ void op::util::ArithmeticReduction::validate_and_infer_types()
|
||||
|
||||
PartialShape result_shape{PartialShape::dynamic()};
|
||||
|
||||
if (input_rank.is_static() && reduction_axes_constant())
|
||||
auto axes = get_constant_from_source(input_value(1));
|
||||
if (input_rank.is_static() && axes)
|
||||
{
|
||||
AxisSet reduction_axes;
|
||||
const auto reduction_axes_val =
|
||||
as_type<op::Constant>(input_value(1).get_node())->cast_vector<int64_t>();
|
||||
const auto reduction_axes_val = axes->cast_vector<int64_t>();
|
||||
for (auto axis : reduction_axes_val)
|
||||
{
|
||||
try
|
||||
|
@ -51,11 +51,11 @@ void op::util::ArithmeticReductionKeepDims::validate_and_infer_types()
|
||||
if (input_rank.is_static())
|
||||
result_shape = PartialShape::dynamic(input_rank);
|
||||
|
||||
if (input_rank.is_static() && reduction_axes_constant())
|
||||
const auto& axes = get_constant_from_source(input_value(1));
|
||||
if (input_rank.is_static() && axes)
|
||||
{
|
||||
AxisSet reduction_axes;
|
||||
auto reduction_axes_val =
|
||||
as_type<op::Constant>(input_value(1).get_node())->cast_vector<int64_t>();
|
||||
auto reduction_axes_val = axes->cast_vector<int64_t>();
|
||||
for (auto axis : reduction_axes_val)
|
||||
{
|
||||
try
|
||||
|
@ -15,6 +15,7 @@
|
||||
//*****************************************************************************
|
||||
|
||||
#include "ngraph/op/util/binary_elementwise_arithmetic.hpp"
|
||||
#include <ngraph/validation_util.hpp>
|
||||
#include "itt.hpp"
|
||||
#include "ngraph/attribute_visitor.hpp"
|
||||
#include "ngraph/op/util/elementwise_args.hpp"
|
||||
@ -65,3 +66,27 @@ bool op::util::BinaryElementwiseArithmetic::visit_attributes(AttributeVisitor& v
|
||||
visitor.on_attribute("auto_broadcast", m_autob);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool op::util::BinaryElementwiseArithmetic::evaluate_upper(
|
||||
const HostTensorVector& output_values) const
|
||||
{
|
||||
HostTensorVector lower_output_tensors;
|
||||
for (const auto& output : output_values)
|
||||
lower_output_tensors.push_back(
|
||||
std::make_shared<HostTensor>(output->get_element_type(), output->get_partial_shape()));
|
||||
if (!interval_bound_evaluator(this, lower_output_tensors, output_values))
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool op::util::BinaryElementwiseArithmetic::evaluate_lower(
|
||||
const HostTensorVector& output_values) const
|
||||
{
|
||||
HostTensorVector upper_output_tensors;
|
||||
for (const auto& output : output_values)
|
||||
upper_output_tensors.push_back(
|
||||
std::make_shared<HostTensor>(output->get_element_type(), output->get_partial_shape()));
|
||||
if (!interval_bound_evaluator(this, output_values, upper_output_tensors))
|
||||
return false;
|
||||
return true;
|
||||
}
|
@ -24,6 +24,7 @@
|
||||
|
||||
#include "ngraph/runtime/reference/broadcast.hpp"
|
||||
|
||||
#include <ngraph/validation_util.hpp>
|
||||
#include <numeric>
|
||||
|
||||
using namespace std;
|
||||
@ -48,9 +49,12 @@ op::util::BroadcastBase::BroadcastBase(const Output<Node>& arg,
|
||||
|
||||
PartialShape op::util::BroadcastBase::get_result_shape_pdpd(
|
||||
const PartialShape& arg0_shape,
|
||||
const Shape& target_shape,
|
||||
const PartialShape& target_pshape,
|
||||
const op::BroadcastModeSpec& broadcast_spec) const
|
||||
{
|
||||
if (target_pshape.is_dynamic())
|
||||
return PartialShape::dynamic(target_pshape.rank());
|
||||
Shape target_shape = target_pshape.to_shape();
|
||||
if (arg0_shape.rank().is_dynamic())
|
||||
{
|
||||
return PartialShape::dynamic(target_shape.size());
|
||||
@ -85,44 +89,47 @@ PartialShape op::util::BroadcastBase::get_result_shape_pdpd(
|
||||
}
|
||||
|
||||
void op::util::BroadcastBase::validate_target_shape_numpy(const PartialShape& arg_shape,
|
||||
const Shape& target_shape) const
|
||||
const PartialShape& target_shape) const
|
||||
{
|
||||
if (arg_shape.rank().is_dynamic())
|
||||
if (arg_shape.rank().is_dynamic() || target_shape.rank().is_dynamic())
|
||||
{
|
||||
return;
|
||||
}
|
||||
const auto arg_rank_length = arg_shape.rank().get_length();
|
||||
const int64_t start_axis = target_shape.size() - arg_rank_length;
|
||||
const auto target_rank_length = target_shape.rank().get_length();
|
||||
const int64_t start_axis = target_rank_length - arg_rank_length;
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
start_axis >= 0,
|
||||
"Broadcast target_shape has smaller rank ",
|
||||
target_shape.size(),
|
||||
target_rank_length,
|
||||
" than arg shape ",
|
||||
arg_rank_length);
|
||||
for (auto i = start_axis; i < target_shape.size(); i++)
|
||||
for (auto i = start_axis; i < target_rank_length; i++)
|
||||
{
|
||||
if (arg_shape[i - start_axis].is_dynamic())
|
||||
{
|
||||
continue;
|
||||
}
|
||||
const size_t arg_dim = arg_shape[i - start_axis].get_length();
|
||||
stringstream ss;
|
||||
ss << " or " << target_shape[i];
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
arg_dim == 1 || arg_dim == target_shape[i],
|
||||
arg_shape[i - start_axis].is_dynamic() ||
|
||||
target_shape[i].is_dynamic() || arg_shape[i - start_axis] == 1 ||
|
||||
arg_shape[i - start_axis] == target_shape[i],
|
||||
"Input shape dimension equal ",
|
||||
arg_dim,
|
||||
arg_shape[i - start_axis],
|
||||
" cannot be broadcasted (numpy mode) to ",
|
||||
target_shape[i],
|
||||
". Allowed input dimension value would be 1",
|
||||
target_shape[i] != 1
|
||||
? (std::string(" or ") + std::to_string(target_shape[i])).c_str()
|
||||
: "");
|
||||
target_shape[i] != 1 ? ss.str() : "");
|
||||
}
|
||||
}
|
||||
|
||||
void op::util::BroadcastBase::validate_target_shape_none(const Shape& arg_shape,
|
||||
void op::util::BroadcastBase::validate_target_shape_none(const PartialShape& arg_shape,
|
||||
const AxisVector& axes_mapping_val,
|
||||
const Shape& target_shape) const
|
||||
const PartialShape& target_shape) const
|
||||
{
|
||||
if (arg_shape.rank().is_dynamic() || target_shape.rank().is_dynamic())
|
||||
{
|
||||
return;
|
||||
}
|
||||
const auto target_rank_length = target_shape.rank().get_length();
|
||||
// axes_mapping needs to be in sorted order
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
std::is_sorted(axes_mapping_val.begin(), axes_mapping_val.end()),
|
||||
@ -130,7 +137,7 @@ void op::util::BroadcastBase::validate_target_shape_none(const Shape& arg_shape,
|
||||
axes_mapping_val,
|
||||
" not in sorted order");
|
||||
|
||||
if (arg_shape.size() == 0 && axes_mapping_val.size() > 0)
|
||||
if (arg_shape.rank().get_length() == 0 && axes_mapping_val.size() > 0)
|
||||
{
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
target_shape[axes_mapping_val[0]] == 1,
|
||||
@ -141,18 +148,18 @@ void op::util::BroadcastBase::validate_target_shape_none(const Shape& arg_shape,
|
||||
for (size_t i = 0; i < axes_mapping_val.size(); i++)
|
||||
{
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
axes_mapping_val[i] < target_shape.size(),
|
||||
axes_mapping_val[i] < target_rank_length,
|
||||
"Broadcast axes_mapping[",
|
||||
i,
|
||||
"]: ",
|
||||
axes_mapping_val[i],
|
||||
" exceeds target rank ",
|
||||
target_shape.size());
|
||||
target_rank_length);
|
||||
|
||||
if (arg_shape.size() > 0)
|
||||
if (arg_shape.rank().get_length() > 0)
|
||||
{
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
target_shape[axes_mapping_val[i]] == arg_shape[i],
|
||||
target_shape[axes_mapping_val[i]].same_scheme(arg_shape[i]),
|
||||
"Broadcast target[axes_mapping[",
|
||||
i,
|
||||
"]]",
|
||||
@ -219,13 +226,15 @@ void op::util::BroadcastBase::validate_and_infer_types()
|
||||
}
|
||||
}
|
||||
|
||||
const auto shape_constant = as_type_ptr<op::v0::Constant>(input_value(1).get_node_shared_ptr());
|
||||
PartialShape output_shape;
|
||||
bool output_shape_defined = evaluate_as_partial_shape(get_input_source_output(1), output_shape);
|
||||
|
||||
if (auto concat = as_type_ptr<op::v0::Concat>(input_value(1).get_node_shared_ptr()))
|
||||
{
|
||||
auto concat_inputs = concat->inputs();
|
||||
|
||||
if (concat->get_output_partial_shape(0).is_static() && concat->get_shape().size() == 1 &&
|
||||
if (!output_shape_defined && concat->get_output_partial_shape(0).is_static() &&
|
||||
concat->get_shape().size() == 1 &&
|
||||
concat_inputs.size() == shape_size(concat->get_shape()))
|
||||
{
|
||||
auto output_partial_shape = vector<Dimension>{};
|
||||
@ -241,15 +250,16 @@ void op::util::BroadcastBase::validate_and_infer_types()
|
||||
output_partial_shape.push_back(Dimension::dynamic());
|
||||
}
|
||||
}
|
||||
result_shape = PartialShape(output_partial_shape);
|
||||
output_shape_defined = true;
|
||||
output_shape = PartialShape(output_partial_shape);
|
||||
}
|
||||
}
|
||||
|
||||
if (m_mode.m_type == BroadcastType::NONE)
|
||||
{
|
||||
if (shape_constant)
|
||||
if (output_shape_defined)
|
||||
{
|
||||
result_shape = shape_constant->get_shape_val();
|
||||
result_shape = output_shape;
|
||||
}
|
||||
// Validate axes_mapping
|
||||
if (get_input_partial_shape(0).is_static() && get_input_partial_shape(1).is_static() &&
|
||||
@ -268,31 +278,27 @@ void op::util::BroadcastBase::validate_and_infer_types()
|
||||
" doesn't match rank of input tensor ",
|
||||
input_rank);
|
||||
|
||||
if (shape_constant && op::is_constant(input_value(2).get_node()))
|
||||
if (output_shape_defined && has_and_set_equal_bounds(input_value(2)))
|
||||
{
|
||||
auto target_shape = shape_constant->get_shape_val();
|
||||
auto axes_mapping_val =
|
||||
as_type_ptr<op::v0::Constant>(input_value(2).get_node_shared_ptr())
|
||||
->get_axis_vector_val();
|
||||
validate_target_shape_none(arg_shape, axes_mapping_val, target_shape);
|
||||
get_constant_from_source(input_value(2))->get_axis_vector_val();
|
||||
validate_target_shape_none(arg_shape, axes_mapping_val, output_shape);
|
||||
}
|
||||
}
|
||||
}
|
||||
else if (m_mode.m_type == BroadcastType::NUMPY)
|
||||
{
|
||||
if (shape_constant)
|
||||
if (output_shape_defined)
|
||||
{
|
||||
const auto target_shape = shape_constant->get_shape_val();
|
||||
result_shape = target_shape;
|
||||
validate_target_shape_numpy(input_shape, target_shape);
|
||||
result_shape = output_shape;
|
||||
validate_target_shape_numpy(input_shape, output_shape);
|
||||
}
|
||||
}
|
||||
else if (m_mode.m_type == BroadcastType::PDPD)
|
||||
{
|
||||
if (shape_constant)
|
||||
if (output_shape_defined)
|
||||
{
|
||||
const auto target_shape = shape_constant->get_shape_val();
|
||||
result_shape = get_result_shape_pdpd(input_shape, target_shape, m_mode);
|
||||
result_shape = get_result_shape_pdpd(input_shape, output_shape, m_mode);
|
||||
}
|
||||
}
|
||||
set_output_type(0, get_input_element_type(0), result_shape);
|
||||
@ -344,8 +350,7 @@ std::pair<bool, AxisSet> op::util::BroadcastBase::get_broadcast_axes() const
|
||||
|
||||
if (m_mode.m_type == BroadcastType::NONE)
|
||||
{
|
||||
const auto axes_mapping_constant =
|
||||
as_type_ptr<op::v0::Constant>(input_value(2).get_node_shared_ptr());
|
||||
const auto axes_mapping_constant = get_constant_from_source(input_value(2));
|
||||
if (get_input_partial_shape(1).is_static() && axes_mapping_constant)
|
||||
{
|
||||
auto axes_mapping_val = axes_mapping_constant->get_axis_vector_val();
|
||||
@ -563,3 +568,19 @@ bool op::util::BroadcastBase::evaluate(const HostTensorVector& outputs,
|
||||
|
||||
return evaluate_broadcast(inputs[0], outputs[0], pair_broadcast_axes, result_shape.to_shape());
|
||||
}
|
||||
|
||||
bool op::util::BroadcastBase::evaluate_lower(const HostTensorVector& output_values) const
|
||||
{
|
||||
if (!input_value(1).get_tensor().has_and_set_bound() ||
|
||||
(get_input_size() > 2 && !input_value(2).get_tensor().has_and_set_bound()))
|
||||
return false;
|
||||
return default_lower_bound_evaluator(this, output_values);
|
||||
}
|
||||
|
||||
bool op::util::BroadcastBase::evaluate_upper(const HostTensorVector& output_values) const
|
||||
{
|
||||
if (!input_value(1).get_tensor().has_and_set_bound() ||
|
||||
(get_input_size() > 2 && !input_value(2).get_tensor().has_and_set_bound()))
|
||||
return false;
|
||||
return default_upper_bound_evaluator(this, output_values);
|
||||
}
|
@ -48,7 +48,6 @@ void op::util::FusedOp::validate_and_infer_types()
|
||||
for (auto& val : input_values())
|
||||
nodes.emplace_back(val.get_node_shared_ptr());
|
||||
auto subgraph = extract_subgraph(ngraph::as_node_vector(subgraph_outputs), nodes);
|
||||
validate_nodes_and_infer_types(subgraph);
|
||||
|
||||
size_t i = 0;
|
||||
for (const auto& output : subgraph_outputs)
|
||||
|
@ -43,13 +43,13 @@ op::util::LogicalReduction::LogicalReduction(const Output<Node>& arg,
|
||||
|
||||
bool op::util::LogicalReduction::reduction_axes_constant() const
|
||||
{
|
||||
return is_type<op::Constant>(input_value(1).get_node());
|
||||
return has_and_set_equal_bounds(input_value(1));
|
||||
}
|
||||
|
||||
const AxisSet op::util::LogicalReduction::get_reduction_axes() const
|
||||
{
|
||||
AxisSet axes;
|
||||
if (auto const_op = as_type<op::Constant>(input_value(1).get_node()))
|
||||
if (auto const_op = get_constant_from_source(input_value(1)))
|
||||
{
|
||||
axes = const_op->get_axis_set_val();
|
||||
}
|
||||
@ -71,11 +71,21 @@ void op::util::LogicalReduction::validate_and_infer_types()
|
||||
|
||||
PartialShape result_shape{PartialShape::dynamic()};
|
||||
|
||||
if (input_rank.is_static() && reduction_axes_constant())
|
||||
set_input_is_relevant_to_shape(1);
|
||||
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
get_input_element_type(0).compatible(element::boolean),
|
||||
"Input element type must be boolean.");
|
||||
|
||||
set_output_type(0, element::boolean, result_shape);
|
||||
|
||||
if (input_rank.is_dynamic())
|
||||
return;
|
||||
|
||||
if (const auto axes_const = get_constant_from_source(input_value(1)))
|
||||
{
|
||||
AxisSet reduction_axes;
|
||||
auto reduction_axes_val =
|
||||
as_type<op::Constant>(input_value(1).get_node())->cast_vector<int64_t>();
|
||||
auto reduction_axes_val = axes_const->cast_vector<int64_t>();
|
||||
for (auto axis : reduction_axes_val)
|
||||
{
|
||||
try
|
||||
@ -110,11 +120,5 @@ void op::util::LogicalReduction::validate_and_infer_types()
|
||||
result_shape = PartialShape(dims);
|
||||
}
|
||||
|
||||
set_input_is_relevant_to_shape(1);
|
||||
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
get_input_element_type(0).compatible(element::boolean),
|
||||
"Input element type must be boolean.");
|
||||
|
||||
set_output_type(0, element::boolean, result_shape);
|
||||
}
|
||||
|
@ -46,18 +46,18 @@ void op::util::LogicalReductionKeepDims::validate_and_infer_types()
|
||||
{
|
||||
const auto input_shape = get_input_partial_shape(0);
|
||||
const auto input_rank = input_shape.rank();
|
||||
PartialShape result_shape{PartialShape::dynamic()};
|
||||
PartialShape result_shape{PartialShape::dynamic(input_rank)};
|
||||
|
||||
if (input_rank.is_static())
|
||||
{
|
||||
result_shape = PartialShape::dynamic(input_rank);
|
||||
}
|
||||
set_input_is_relevant_to_shape(1);
|
||||
set_output_type(0, get_input_element_type(0), result_shape);
|
||||
|
||||
if (input_rank.is_static() && reduction_axes_constant())
|
||||
if (input_shape.is_dynamic())
|
||||
return;
|
||||
|
||||
if (auto axes_const = get_constant_from_source(input_value(1)))
|
||||
{
|
||||
AxisSet reduction_axes;
|
||||
auto reduction_axes_val =
|
||||
as_type<op::Constant>(input_value(1).get_node())->cast_vector<int64_t>();
|
||||
auto reduction_axes_val = axes_const->cast_vector<int64_t>();
|
||||
for (auto axis : reduction_axes_val)
|
||||
{
|
||||
try
|
||||
@ -94,7 +94,7 @@ void op::util::LogicalReductionKeepDims::validate_and_infer_types()
|
||||
}
|
||||
result_shape = PartialShape(dims);
|
||||
}
|
||||
set_input_is_relevant_to_shape(1);
|
||||
|
||||
set_output_type(0, get_input_element_type(0), result_shape);
|
||||
}
|
||||
else
|
||||
|
@ -81,14 +81,19 @@ void op::util::ScatterBase::validate_and_infer_types()
|
||||
data_shape.rank().get_length() - 1,
|
||||
"Updates rank is expected to be indices rank + data rank - 1.");
|
||||
|
||||
bool is_axis_constant = op::is_constant(input_value(AXIS).get_node());
|
||||
if (data_shape.is_dynamic())
|
||||
{
|
||||
set_input_is_relevant_to_shape(0);
|
||||
}
|
||||
set_output_type(0, data_et, data_shape);
|
||||
|
||||
if (data_shape.rank().is_dynamic())
|
||||
return;
|
||||
|
||||
// Get axis value if possible.
|
||||
if (is_axis_constant && data_shape.rank().is_static())
|
||||
if (const auto& axis_const_input = get_constant_from_source(input_value(AXIS)))
|
||||
{
|
||||
bool compatible = true;
|
||||
const auto axis_const_input =
|
||||
as_type_ptr<op::v0::Constant>(input_value(AXIS).get_node_shared_ptr());
|
||||
int64_t axis = axis_const_input->cast_vector<int64_t>().at(0);
|
||||
axis = normalize_axis(this, axis, data_shape.rank().get_length());
|
||||
|
||||
@ -125,12 +130,6 @@ void op::util::ScatterBase::validate_and_infer_types()
|
||||
axis,
|
||||
".");
|
||||
}
|
||||
|
||||
if (data_shape.is_dynamic())
|
||||
{
|
||||
set_input_is_relevant_to_shape(0);
|
||||
}
|
||||
set_output_type(0, data_et, data_shape);
|
||||
}
|
||||
|
||||
bool op::util::ScatterBase::visit_attributes(AttributeVisitor& visitor)
|
||||
|
@ -142,6 +142,7 @@ void op::util::SubGraphOp::set_merged_input(const std::shared_ptr<Parameter>& bo
|
||||
input_for_value(initial_value).get_index(),
|
||||
m_body->get_parameter_index(body_parameter),
|
||||
m_body->get_result_index(successive_value)));
|
||||
validate_and_infer_types();
|
||||
}
|
||||
|
||||
void op::util::SubGraphOp::set_invariant_input(const std::shared_ptr<Parameter>& body_parameter,
|
||||
@ -149,6 +150,7 @@ void op::util::SubGraphOp::set_invariant_input(const std::shared_ptr<Parameter>&
|
||||
{
|
||||
m_input_descriptions.push_back(std::make_shared<TensorIterator::InvariantInputDescription>(
|
||||
input_for_value(value).get_index(), m_body->get_parameter_index(body_parameter)));
|
||||
validate_and_infer_types();
|
||||
}
|
||||
|
||||
Output<Node> op::util::SubGraphOp::get_iter_value(const Output<Node>& body_value, int64_t iteration)
|
||||
@ -157,6 +159,7 @@ Output<Node> op::util::SubGraphOp::get_iter_value(const Output<Node>& body_value
|
||||
m_output_descriptions.push_back(std::make_shared<BodyOutputDescription>(
|
||||
m_body->get_result_index(body_value), output_index, iteration));
|
||||
set_output_size(output_index + 1);
|
||||
validate_and_infer_types();
|
||||
return Output<Node>(shared_from_this(), output_index);
|
||||
}
|
||||
|
||||
@ -171,6 +174,7 @@ Output<Node> op::util::SubGraphOp::get_concatenated_slices(const Output<Node>& b
|
||||
m_output_descriptions.push_back(std::make_shared<ConcatOutputDescription>(
|
||||
m_body->get_result_index(body_value), output_index, start, stride, part_size, end, axis));
|
||||
set_output_size(output_index + 1);
|
||||
validate_and_infer_types();
|
||||
return Output<Node>(shared_from_this(), output_index);
|
||||
}
|
||||
|
||||
@ -190,6 +194,7 @@ void op::util::SubGraphOp::set_sliced_input(const std::shared_ptr<Parameter>& pa
|
||||
part_size,
|
||||
end,
|
||||
axis));
|
||||
validate_and_infer_types();
|
||||
}
|
||||
|
||||
Input<Node> op::util::SubGraphOp::input_for_value(const Output<Node>& value)
|
||||
|
@ -63,23 +63,21 @@ void ngraph::op::v1::VariadicSplit::validate_and_infer_types()
|
||||
|
||||
auto num_outputs = split_lengths_pshape[0].get_length();
|
||||
auto data = input_value(0);
|
||||
auto axis_input = input_value(1).get_node_shared_ptr();
|
||||
auto split_lengths_input = input_value(2).get_node_shared_ptr();
|
||||
auto axis_source = input_value(1);
|
||||
auto split_lengths_source = input_value(2);
|
||||
auto data_shape = data.get_partial_shape();
|
||||
const auto& data_type = data.get_element_type();
|
||||
|
||||
set_output_size(num_outputs);
|
||||
if (data_shape.rank().is_static() && op::is_constant(axis_input) &&
|
||||
op::is_constant(split_lengths_input))
|
||||
const auto& axis_input_constant = get_constant_from_source(axis_source);
|
||||
const auto& split_lengths_constant = get_constant_from_source(split_lengths_source);
|
||||
if (data_shape.rank().is_static() && axis_input_constant && split_lengths_constant)
|
||||
{
|
||||
const auto axis_input_constant = as_type_ptr<op::Constant>(axis_input);
|
||||
auto axis_val = axis_input_constant->cast_vector<int64_t>()[0];
|
||||
|
||||
// Adjust split axis in case of negatives
|
||||
int64_t axis = ngraph::normalize_axis(this, axis_val, data_shape.rank());
|
||||
|
||||
auto split_lengths =
|
||||
as_type_ptr<op::Constant>(split_lengths_input)->cast_vector<int64_t>();
|
||||
auto split_lengths = split_lengths_constant->cast_vector<int64_t>();
|
||||
// Adjust split lengths in case of negatives
|
||||
size_t sum_of_splits = 0;
|
||||
int64_t negative_one = -1;
|
||||
|
@ -15,6 +15,7 @@
|
||||
//*****************************************************************************
|
||||
|
||||
#include "ngraph/pass/constant_folding.hpp"
|
||||
#include <ngraph/op/constant.hpp>
|
||||
#include "ngraph/op/util/sub_graph_base.hpp"
|
||||
#include "ngraph/rt_info.hpp"
|
||||
|
||||
@ -25,13 +26,13 @@ NGRAPH_RTTI_DEFINITION(ngraph::pass::ConstantFolding, "ConstantFolding", 0);
|
||||
|
||||
bool ngraph::pass::ConstantFolding::run_on_function(std::shared_ptr<ngraph::Function> f)
|
||||
{
|
||||
bool rewritten = false;
|
||||
bool rewritten = pre_calculated_values_folding(f);
|
||||
|
||||
for (const auto& node : f->get_ordered_ops())
|
||||
{
|
||||
if (rewritten)
|
||||
{
|
||||
node->revalidate_and_infer_types();
|
||||
node->validate_and_infer_types();
|
||||
}
|
||||
|
||||
OutputVector replacements(node->get_output_size());
|
||||
@ -90,3 +91,58 @@ void ngraph::pass::ConstantFolding::copy_runtime_info_to_target_inputs(
|
||||
copy_runtime_info({node, consumer}, consumer);
|
||||
}
|
||||
}
|
||||
|
||||
bool ngraph::pass::ConstantFolding::pre_calculated_values_folding(
|
||||
const std::shared_ptr<ngraph::Function>& f)
|
||||
{
|
||||
deque<shared_ptr<Node>> nodes;
|
||||
set<shared_ptr<Node>> visited;
|
||||
for (auto& r : f->get_results())
|
||||
nodes.push_back(r);
|
||||
for (auto& r : f->get_sinks())
|
||||
nodes.emplace_back(r);
|
||||
|
||||
bool rewritten = false;
|
||||
while (!nodes.empty())
|
||||
{
|
||||
auto curr_node = nodes.front();
|
||||
nodes.pop_front();
|
||||
if (visited.count(curr_node) || is_type<op::Constant>(curr_node))
|
||||
continue;
|
||||
visited.insert(curr_node);
|
||||
|
||||
for (auto& input_value : curr_node->input_values())
|
||||
{
|
||||
if (input_value.get_tensor().has_and_set_bound())
|
||||
{
|
||||
auto input_node = input_value.get_node_shared_ptr();
|
||||
auto replacement =
|
||||
std::make_shared<op::Constant>(input_value.get_tensor().get_lower_value());
|
||||
if (replacement && !is_type<op::Constant>(input_node))
|
||||
{
|
||||
if (input_node->get_output_size() == 1)
|
||||
{
|
||||
replacement->set_friendly_name(input_node->get_friendly_name());
|
||||
}
|
||||
else
|
||||
{
|
||||
replacement->set_friendly_name(input_node->get_friendly_name() + "." +
|
||||
std::to_string(input_value.get_index()));
|
||||
}
|
||||
input_value.replace(replacement);
|
||||
// Propagate runtime info attributes to replacement consumer nodes
|
||||
copy_runtime_info_to_target_inputs(input_node, replacement);
|
||||
|
||||
rewritten = true;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// continue searching
|
||||
const auto& input_node = input_value.get_node_shared_ptr();
|
||||
nodes.push_front(input_node);
|
||||
}
|
||||
}
|
||||
}
|
||||
return rewritten;
|
||||
}
|
||||
|
@ -314,7 +314,7 @@ static std::string pretty_partial_shape(const PartialShape& shape)
|
||||
}
|
||||
if (shape[i].is_dynamic())
|
||||
{
|
||||
ss << "?";
|
||||
ss << shape[i];
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -15,6 +15,10 @@
|
||||
//*****************************************************************************
|
||||
|
||||
#include <algorithm>
|
||||
#include <ngraph/ops.hpp>
|
||||
#include <ngraph/pass/constant_folding.hpp>
|
||||
#include <ngraph/rt_info.hpp>
|
||||
#include <numeric>
|
||||
|
||||
#include "ngraph/evaluator.hpp"
|
||||
#include "ngraph/op/concat.hpp"
|
||||
@ -1194,3 +1198,380 @@ void ngraph::evaluate_nodes(std::map<RawNodeOutput, HostTensorPtr>& value_map,
|
||||
evaluator.evaluate(value);
|
||||
}
|
||||
}
|
||||
|
||||
bool could_propagate(const Output<Node>& output, std::vector<Node*>& order)
|
||||
{
|
||||
bool status = true;
|
||||
|
||||
std::deque<Node*> nodes_to_calculate = {output.get_node()};
|
||||
order.push_back(output.get_node());
|
||||
|
||||
while (status && !nodes_to_calculate.empty())
|
||||
{
|
||||
auto current_node = nodes_to_calculate.front();
|
||||
nodes_to_calculate.pop_front();
|
||||
|
||||
if (current_node->inputs().empty() && !is_type<op::Constant>(current_node))
|
||||
status = false;
|
||||
else if (!is_type<op::v0::ShapeOf>(current_node) && !is_type<op::v3::ShapeOf>(current_node))
|
||||
{
|
||||
// not a leaf, not a shape_of -- continue to search
|
||||
for (const auto& input_value : current_node->input_values())
|
||||
{
|
||||
const auto& input_node = input_value.get_node();
|
||||
order.push_back(input_node);
|
||||
nodes_to_calculate.push_front(input_node);
|
||||
}
|
||||
}
|
||||
}
|
||||
return status;
|
||||
}
|
||||
|
||||
HostTensorPtr evaluate_bound(const Output<Node>& output, bool is_upper)
|
||||
{
|
||||
// bound is already set in the tensor
|
||||
if (is_upper && output.get_tensor().get_upper_value() != nullptr)
|
||||
return output.get_tensor().get_upper_value();
|
||||
if (!is_upper && output.get_tensor().get_lower_value() != nullptr)
|
||||
return output.get_tensor().get_lower_value();
|
||||
|
||||
std::vector<Node*> order;
|
||||
if (could_propagate(output, order))
|
||||
{
|
||||
reverse(order.begin(), order.end());
|
||||
for (const auto& node : order)
|
||||
{
|
||||
HostTensorVector outputs;
|
||||
for (const auto& out : node->outputs())
|
||||
outputs.push_back(std::make_shared<HostTensor>(out));
|
||||
if (is_upper ? node->evaluate_upper(outputs) : node->evaluate_lower(outputs))
|
||||
{
|
||||
const auto& input_values = node->input_values();
|
||||
bool same_inputs = std::all_of(
|
||||
input_values.begin(), input_values.end(), [](const Output<Node>& input) {
|
||||
return input.get_tensor().has_and_set_bound();
|
||||
});
|
||||
for (size_t i = 0; i < outputs.size(); ++i)
|
||||
{
|
||||
// TODO: should we skip setting value for tensors that have only one consumer?
|
||||
if ((same_inputs || is_upper) &&
|
||||
node->get_output_tensor(i).get_upper_value() == nullptr)
|
||||
node->get_output_tensor(i).set_upper_value(outputs[i]);
|
||||
if ((same_inputs || !is_upper) &&
|
||||
node->get_output_tensor(i).get_lower_value() == nullptr)
|
||||
node->get_output_tensor(i).set_lower_value(outputs[i]);
|
||||
}
|
||||
for (const auto& input : input_values)
|
||||
if (input.get_target_inputs().size() == 1)
|
||||
input.get_tensor().invalidate_values();
|
||||
}
|
||||
else
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (is_upper)
|
||||
return output.get_tensor().get_upper_value();
|
||||
else
|
||||
return output.get_tensor().get_lower_value();
|
||||
}
|
||||
|
||||
HostTensorPtr ngraph::evaluate_lower_bound(const Output<Node>& output)
|
||||
{
|
||||
return evaluate_bound(output, false);
|
||||
}
|
||||
|
||||
HostTensorPtr ngraph::evaluate_upper_bound(const Output<Node>& output)
|
||||
{
|
||||
return evaluate_bound(output, true);
|
||||
}
|
||||
|
||||
pair<HostTensorPtr, HostTensorPtr> ngraph::evaluate_both_bounds(const Output<Node>& output)
|
||||
{
|
||||
return {evaluate_lower_bound(output), evaluate_upper_bound(output)};
|
||||
}
|
||||
|
||||
bool ngraph::evaluate_as_partial_shape(const Output<Node>& output, PartialShape& pshape)
|
||||
{
|
||||
HostTensorPtr lb, ub;
|
||||
std::tie(lb, ub) = evaluate_both_bounds(output);
|
||||
bool shape_defined = false;
|
||||
if (lb && ub)
|
||||
{
|
||||
const auto lower_bound = std::make_shared<op::Constant>(lb)->cast_vector<int64_t>();
|
||||
const auto upper_bound = std::make_shared<op::Constant>(ub)->cast_vector<int64_t>();
|
||||
NGRAPH_CHECK(lower_bound.size() == upper_bound.size());
|
||||
vector<Dimension> resulting_pshape(lower_bound.size());
|
||||
for (size_t i = 0; i < lower_bound.size(); ++i)
|
||||
{
|
||||
NGRAPH_CHECK(lower_bound[i] >= 0 && upper_bound[i] >= 0);
|
||||
resulting_pshape[i] = {lower_bound[i], upper_bound[i]};
|
||||
}
|
||||
pshape = PartialShape(resulting_pshape);
|
||||
shape_defined = true;
|
||||
}
|
||||
return shape_defined;
|
||||
}
|
||||
|
||||
bool default_bound_evaluator(const Node* node, const HostTensorVector& output_values, bool is_upper)
|
||||
{
|
||||
HostTensorVector input_tensors;
|
||||
for (const auto& input : node->input_values())
|
||||
{
|
||||
if (auto bound = is_upper ? input.get_tensor().get_upper_value()
|
||||
: input.get_tensor().get_lower_value())
|
||||
input_tensors.push_back(bound);
|
||||
else
|
||||
return false;
|
||||
}
|
||||
return node->evaluate(output_values, input_tensors);
|
||||
}
|
||||
|
||||
bool ngraph::default_lower_bound_evaluator(const Node* node, const HostTensorVector& output_values)
|
||||
{
|
||||
return default_bound_evaluator(node, output_values, false);
|
||||
}
|
||||
|
||||
bool ngraph::default_upper_bound_evaluator(const Node* node, const HostTensorVector& output_values)
|
||||
{
|
||||
return default_bound_evaluator(node, output_values, true);
|
||||
}
|
||||
|
||||
shared_ptr<op::Constant> ngraph::get_constant_max_of_type(element::Type_t t)
|
||||
{
|
||||
#define NGRAPH_TYPE_TO_MAX_CONST(t) \
|
||||
case t: \
|
||||
return op::Constant::create( \
|
||||
t, {}, {std::numeric_limits<typename element_type_traits<t>::value_type>::max()}); \
|
||||
break
|
||||
|
||||
switch (t)
|
||||
{
|
||||
NGRAPH_TYPE_TO_MAX_CONST(element::boolean);
|
||||
NGRAPH_TYPE_TO_MAX_CONST(element::bf16);
|
||||
NGRAPH_TYPE_TO_MAX_CONST(element::f16);
|
||||
NGRAPH_TYPE_TO_MAX_CONST(element::f32);
|
||||
NGRAPH_TYPE_TO_MAX_CONST(element::f64);
|
||||
NGRAPH_TYPE_TO_MAX_CONST(element::i8);
|
||||
NGRAPH_TYPE_TO_MAX_CONST(element::i16);
|
||||
NGRAPH_TYPE_TO_MAX_CONST(element::i32);
|
||||
NGRAPH_TYPE_TO_MAX_CONST(element::i64);
|
||||
NGRAPH_TYPE_TO_MAX_CONST(element::u1);
|
||||
NGRAPH_TYPE_TO_MAX_CONST(element::u8);
|
||||
NGRAPH_TYPE_TO_MAX_CONST(element::u16);
|
||||
NGRAPH_TYPE_TO_MAX_CONST(element::u32);
|
||||
NGRAPH_TYPE_TO_MAX_CONST(element::u64);
|
||||
|
||||
case element::undefined:
|
||||
case element::dynamic:
|
||||
default: return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
shared_ptr<op::Constant> ngraph::get_constant_min_of_type(element::Type_t t)
|
||||
{
|
||||
#define NGRAPH_TYPE_TO_MIN_CONST(t) \
|
||||
case t: \
|
||||
return op::Constant::create( \
|
||||
t, {}, {std::numeric_limits<typename element_type_traits<t>::value_type>::min()}); \
|
||||
break
|
||||
|
||||
switch (t)
|
||||
{
|
||||
NGRAPH_TYPE_TO_MIN_CONST(element::boolean);
|
||||
NGRAPH_TYPE_TO_MIN_CONST(element::bf16);
|
||||
NGRAPH_TYPE_TO_MIN_CONST(element::f16);
|
||||
NGRAPH_TYPE_TO_MIN_CONST(element::f32);
|
||||
NGRAPH_TYPE_TO_MIN_CONST(element::f64);
|
||||
NGRAPH_TYPE_TO_MIN_CONST(element::i8);
|
||||
NGRAPH_TYPE_TO_MIN_CONST(element::i16);
|
||||
NGRAPH_TYPE_TO_MIN_CONST(element::i32);
|
||||
NGRAPH_TYPE_TO_MIN_CONST(element::i64);
|
||||
NGRAPH_TYPE_TO_MIN_CONST(element::u1);
|
||||
NGRAPH_TYPE_TO_MIN_CONST(element::u8);
|
||||
NGRAPH_TYPE_TO_MIN_CONST(element::u16);
|
||||
NGRAPH_TYPE_TO_MIN_CONST(element::u32);
|
||||
NGRAPH_TYPE_TO_MIN_CONST(element::u64);
|
||||
|
||||
case element::undefined:
|
||||
case element::dynamic:
|
||||
default: return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
HostTensorPtr equality_mask(const HostTensorPtr& tensor, const shared_ptr<op::Constant>& constant)
|
||||
{
|
||||
auto mask = std::make_shared<HostTensor>(element::boolean, tensor->get_shape());
|
||||
const auto& param =
|
||||
std::make_shared<op::Parameter>(tensor->get_element_type(), tensor->get_shape());
|
||||
op::v1::Equal(param, constant, ngraph::op::AutoBroadcastSpec::NUMPY)
|
||||
.evaluate({mask}, {tensor, std::make_shared<HostTensor>(constant)});
|
||||
return mask;
|
||||
}
|
||||
|
||||
HostTensorPtr or_tensor(const HostTensorPtr& lhs, const HostTensorPtr& rhs)
|
||||
{
|
||||
auto result = std::make_shared<HostTensor>(element::boolean, lhs->get_shape());
|
||||
op::v1::LogicalOr(std::make_shared<op::Parameter>(lhs->get_element_type(), lhs->get_shape()),
|
||||
std::make_shared<op::Parameter>(rhs->get_element_type(), rhs->get_shape()),
|
||||
ngraph::op::AutoBroadcastSpec::NUMPY)
|
||||
.evaluate({result}, {lhs, rhs});
|
||||
return result;
|
||||
}
|
||||
|
||||
bool ngraph::interval_bound_evaluator(const Node* node,
|
||||
const HostTensorVector& lower_output_values,
|
||||
const HostTensorVector& upper_output_values)
|
||||
{
|
||||
// TODO: relax for n inputs ?
|
||||
NGRAPH_CHECK(lower_output_values.size() == upper_output_values.size());
|
||||
NGRAPH_CHECK(node->get_input_size() == 2);
|
||||
|
||||
const auto num_of_outputs = node->get_output_size();
|
||||
std::shared_ptr<HostTensor> low_0 = evaluate_lower_bound(node->get_input_source_output(0));
|
||||
std::shared_ptr<HostTensor> low_1 = evaluate_lower_bound(node->get_input_source_output(1));
|
||||
std::shared_ptr<HostTensor> up_0 = evaluate_upper_bound(node->get_input_source_output(0));
|
||||
std::shared_ptr<HostTensor> up_1 = evaluate_upper_bound(node->get_input_source_output(1));
|
||||
std::set<HostTensorVector> input_variants = {
|
||||
{low_0, low_1}, {low_0, up_1}, {up_0, low_1}, {up_0, up_1}};
|
||||
|
||||
for (const auto& variant_of_input_vector : input_variants)
|
||||
for (const auto& input_tensor : variant_of_input_vector)
|
||||
if (input_tensor == nullptr)
|
||||
return false;
|
||||
|
||||
if (input_variants.size() == 1)
|
||||
return node->evaluate(upper_output_values, *input_variants.begin()) &&
|
||||
node->evaluate(lower_output_values, *input_variants.begin());
|
||||
|
||||
auto zero = op::v0::Constant::create(element::i64, {1}, {0});
|
||||
std::vector<HostTensorVector> unsqueezed_output_variants;
|
||||
for (auto& input_variant : input_variants)
|
||||
{
|
||||
HostTensorVector vector_of_output_variants;
|
||||
for (const auto& output : lower_output_values)
|
||||
vector_of_output_variants.push_back(std::make_shared<HostTensor>(
|
||||
output->get_element_type(), output->get_partial_shape()));
|
||||
|
||||
node->evaluate(vector_of_output_variants, input_variant);
|
||||
|
||||
HostTensorVector vector_of_unsqueezed_output_variants;
|
||||
for (const auto& output : vector_of_output_variants)
|
||||
{
|
||||
if (!output)
|
||||
return false;
|
||||
auto unsqueezed_shape = output->get_shape();
|
||||
unsqueezed_shape.insert(unsqueezed_shape.begin(), 1);
|
||||
const auto unsqueezed =
|
||||
make_shared<HostTensor>(output->get_element_type(), unsqueezed_shape);
|
||||
op::v0::Unsqueeze().evaluate({unsqueezed}, {output, make_shared<HostTensor>(zero)});
|
||||
vector_of_unsqueezed_output_variants.push_back(unsqueezed);
|
||||
}
|
||||
unsqueezed_output_variants.push_back(vector_of_unsqueezed_output_variants);
|
||||
}
|
||||
|
||||
auto input_0_maximum_value = get_constant_max_of_type(low_0->get_element_type());
|
||||
auto input_1_maximum_value = get_constant_max_of_type(low_1->get_element_type());
|
||||
if (input_0_maximum_value == nullptr || input_1_maximum_value == nullptr)
|
||||
return false;
|
||||
|
||||
auto input_0_low_dyn_mask = equality_mask(low_0, input_0_maximum_value);
|
||||
auto input_0_up_dyn_mask = equality_mask(up_0, input_0_maximum_value);
|
||||
auto input_1_low_dyn_mask = equality_mask(low_1, input_1_maximum_value);
|
||||
auto input_1_up_dyn_mask = equality_mask(up_1, input_1_maximum_value);
|
||||
|
||||
auto final_input_dyn_mask = or_tensor(or_tensor(input_0_low_dyn_mask, input_0_up_dyn_mask),
|
||||
or_tensor(input_1_low_dyn_mask, input_1_up_dyn_mask));
|
||||
|
||||
bool fully_defined = true;
|
||||
for (size_t i = 0; i < num_of_outputs; ++i)
|
||||
{
|
||||
HostTensorVector all_variants_for_ith_output;
|
||||
for (const auto& unsqueezed_output_variant : unsqueezed_output_variants)
|
||||
all_variants_for_ith_output.push_back(unsqueezed_output_variant[i]);
|
||||
|
||||
auto concated_shape = all_variants_for_ith_output[0]->get_shape();
|
||||
concated_shape[0] = all_variants_for_ith_output.size();
|
||||
auto concated = make_shared<HostTensor>(all_variants_for_ith_output[0]->get_element_type(),
|
||||
concated_shape);
|
||||
auto concat = op::Concat();
|
||||
concat.set_axis(0);
|
||||
concat.evaluate({concated}, all_variants_for_ith_output);
|
||||
|
||||
auto fake_param = make_shared<op::Parameter>(
|
||||
all_variants_for_ith_output[0]->get_element_type(), concated_shape);
|
||||
auto reduce_min_op = op::v1::ReduceMin(fake_param, zero, false);
|
||||
reduce_min_op.evaluate({lower_output_values[i]}, {concated, make_shared<HostTensor>(zero)});
|
||||
auto reduce_max_op = op::v1::ReduceMax(fake_param, zero, false);
|
||||
reduce_max_op.evaluate({upper_output_values[i]}, {concated, make_shared<HostTensor>(zero)});
|
||||
|
||||
if (upper_output_values[i] == nullptr)
|
||||
fully_defined = false;
|
||||
else
|
||||
{
|
||||
auto output_maximum_value =
|
||||
get_constant_max_of_type(upper_output_values[i]->get_element_type());
|
||||
op::v1::Select().evaluate({upper_output_values[i]},
|
||||
{final_input_dyn_mask,
|
||||
std::make_shared<HostTensor>(output_maximum_value),
|
||||
upper_output_values[i]});
|
||||
node->get_output_tensor(i).set_upper_value(upper_output_values[i]);
|
||||
}
|
||||
if (lower_output_values[i] == nullptr)
|
||||
fully_defined = false;
|
||||
else
|
||||
{
|
||||
auto output_minimum_value =
|
||||
op::Constant::create(lower_output_values[i]->get_element_type(), {}, {0});
|
||||
// Can not set to get_constant_min_of_type(lower_output_values[i]->get_element_type())
|
||||
// yet
|
||||
op::v1::Select().evaluate({lower_output_values[i]},
|
||||
{final_input_dyn_mask,
|
||||
std::make_shared<HostTensor>(output_minimum_value),
|
||||
lower_output_values[i]});
|
||||
node->get_output_tensor(i).set_lower_value(lower_output_values[i]);
|
||||
}
|
||||
}
|
||||
return fully_defined;
|
||||
}
|
||||
|
||||
bool ngraph::host_tensor_is_positive(const HostTensorPtr& bound)
|
||||
{
|
||||
const auto bound_constant = std::make_shared<op::Constant>(bound);
|
||||
const auto zero_constant = op::Constant::create(bound->get_element_type(), {1}, {0});
|
||||
OutputVector greater(1);
|
||||
bool folded = std::make_shared<op::v1::Greater>(bound_constant, zero_constant)
|
||||
->constant_fold(greater, {bound_constant, zero_constant});
|
||||
NGRAPH_CHECK(folded);
|
||||
|
||||
auto axes_vector = std::vector<int64_t>(greater[0].get_shape().size());
|
||||
std::iota(axes_vector.begin(), axes_vector.end(), 0);
|
||||
const auto axes = op::Constant::create(element::i64, {axes_vector.size()}, axes_vector);
|
||||
OutputVector all(1);
|
||||
folded = std::make_shared<op::v1::ReduceLogicalAnd>(greater[0], axes)
|
||||
->constant_fold(all, {greater[0], axes});
|
||||
NGRAPH_CHECK(folded && is_type<op::Constant>(all[0].get_node_shared_ptr()));
|
||||
const auto result =
|
||||
std::dynamic_pointer_cast<op::Constant>(all[0].get_node_shared_ptr())->cast_vector<bool>();
|
||||
NGRAPH_CHECK(all[0].get_shape() == Shape{});
|
||||
return result[0];
|
||||
}
|
||||
|
||||
bool ngraph::has_and_set_equal_bounds(const Output<Node>& source)
|
||||
{
|
||||
if (op::is_constant(source.get_node_shared_ptr()))
|
||||
return true;
|
||||
HostTensorPtr lb, ub;
|
||||
std::tie(lb, ub) = evaluate_both_bounds(source);
|
||||
return lb && lb == ub;
|
||||
}
|
||||
|
||||
shared_ptr<op::Constant> ngraph::get_constant_from_source(const Output<Node>& source)
|
||||
{
|
||||
if (!has_and_set_equal_bounds(source))
|
||||
return nullptr;
|
||||
if (const auto& c = as_type_ptr<op::Constant>(source.get_node_shared_ptr()))
|
||||
return c;
|
||||
return std::make_shared<op::Constant>(source.get_tensor().get_upper_value());
|
||||
}
|
@ -517,8 +517,8 @@ TEST(attributes, max_pool_op)
|
||||
TEST(attributes, mod_op)
|
||||
{
|
||||
NodeBuilder::get_ops().register_factory<opset1::Mod>();
|
||||
auto A = make_shared<op::Parameter>(element::f32, Shape{0, 2});
|
||||
auto B = make_shared<op::Parameter>(element::f32, Shape{2, 0});
|
||||
auto A = make_shared<op::Parameter>(element::f32, Shape{1, 2});
|
||||
auto B = make_shared<op::Parameter>(element::f32, Shape{2, 1});
|
||||
|
||||
auto auto_broadcast = op::AutoBroadcastType::NUMPY;
|
||||
|
||||
|
@ -22,6 +22,7 @@
|
||||
#include "util/test_tools.hpp"
|
||||
|
||||
#include <memory>
|
||||
#include <util/type_prop.hpp>
|
||||
|
||||
NGRAPH_SUPPRESS_DEPRECATED_START
|
||||
|
||||
@ -108,7 +109,7 @@ TEST(build_graph, function_undeclared_parameters)
|
||||
}
|
||||
catch (const ngraph_error& error)
|
||||
{
|
||||
EXPECT_EQ(error.what(), std::string("Function references undeclared parameter"));
|
||||
EXPECT_HAS_SUBSTRING(error.what(), std::string("Function references undeclared parameter"));
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
|
@ -1996,8 +1996,6 @@ TEST(constant_folding, constant_dyn_reshape_shape_not_originally_constant)
|
||||
dyn_reshape->set_friendly_name("test");
|
||||
auto f = make_shared<Function>(dyn_reshape, ParameterVector{});
|
||||
|
||||
ASSERT_TRUE(dyn_reshape->get_output_partial_shape(0).is_dynamic());
|
||||
|
||||
pass::Manager pass_manager;
|
||||
pass_manager.register_pass<pass::ConstantFolding>();
|
||||
pass_manager.run_passes(f);
|
||||
|
@ -279,9 +279,7 @@ bool runtime::dynamic::DynamicExecutable::call(
|
||||
num_dyn_nodes_last_pass = num_dyn_nodes_this_pass;
|
||||
}
|
||||
|
||||
pass::Manager pass_val;
|
||||
pass_val.register_pass<pass::Validate>();
|
||||
pass_val.run_passes(clone);
|
||||
clone->validate_nodes_and_infer_types();
|
||||
|
||||
std::vector<std::shared_ptr<runtime::Tensor>> wrapped_outputs;
|
||||
|
||||
|
@ -90,8 +90,7 @@ TYPED_TEST_P(BroadcastTests, broadcast_target_shape_as_concat_with_node)
|
||||
ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_static());
|
||||
ASSERT_TRUE(bc->get_output_partial_shape(0).rank().same_scheme(Rank{4}));
|
||||
ASSERT_TRUE(bc->get_output_partial_shape(0).is_dynamic());
|
||||
ASSERT_TRUE(bc->get_output_partial_shape(0).same_scheme(
|
||||
PartialShape{Dimension::dynamic(), 16, 50, 50}));
|
||||
ASSERT_EQ(bc->get_output_partial_shape(0), PartialShape({Dimension::dynamic(), 16, 50, 50}));
|
||||
}
|
||||
|
||||
TYPED_TEST_P(BroadcastTests, broadcast_fail_rank)
|
||||
|
@ -108,7 +108,6 @@ TEST(type_prop, loop_operation_for_mode_10_iter_static_shapes)
|
||||
EXPECT_NE(output_desc, nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
auto result0 = make_shared<opset5::Result>(out0);
|
||||
auto result1 = make_shared<opset5::Result>(out1);
|
||||
auto result2 = make_shared<opset5::Result>(out2);
|
||||
@ -213,7 +212,6 @@ TEST(type_prop, loop_operation_dowhile_mode_1_iter_static_shapes)
|
||||
EXPECT_NE(output_desc, nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
auto result0 = make_shared<opset5::Result>(out0);
|
||||
auto result1 = make_shared<opset5::Result>(out1);
|
||||
auto result2 = make_shared<opset5::Result>(out2);
|
||||
@ -316,7 +314,6 @@ TEST(type_prop, loop_operation_for_and_condition_mode_dynamic_iter_static_shapes
|
||||
EXPECT_NE(output_desc, nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
auto result0 = make_shared<opset5::Result>(out0);
|
||||
auto result1 = make_shared<opset5::Result>(out1);
|
||||
Shape out0_shape{1};
|
||||
@ -416,7 +413,6 @@ TEST(type_prop, loop_operation_for_and_condition_mode_dynamic_iter_dynamic_shape
|
||||
EXPECT_NE(output_desc, nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
auto result0 = make_shared<opset5::Result>(out0);
|
||||
auto result1 = make_shared<opset5::Result>(out1);
|
||||
auto result2 = make_shared<opset5::Result>(out2);
|
||||
@ -524,7 +520,6 @@ TEST(type_prop, loop_operation_for_and_condition_mode_dynamic_iter_partially_dyn
|
||||
EXPECT_NE(output_desc, nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
auto result0 = make_shared<opset5::Result>(out0);
|
||||
auto result1 = make_shared<opset5::Result>(out1);
|
||||
auto result2 = make_shared<opset5::Result>(out2);
|
||||
@ -586,12 +581,9 @@ TEST(type_prop, loop_operation_for_and_condition_mode_dynamic_iter_incorrect_sli
|
||||
loop->set_merged_input(M_body, M, Zo);
|
||||
|
||||
const auto sliced_output_axis = 4;
|
||||
auto out = loop->get_concatenated_slices(Zo, 0, 1, 1, -1, sliced_output_axis);
|
||||
|
||||
auto result = make_shared<opset5::Result>(out);
|
||||
try
|
||||
{
|
||||
auto f = make_shared<Function>(ResultVector{result}, ParameterVector{X, Y, M});
|
||||
auto out = loop->get_concatenated_slices(Zo, 0, 1, 1, -1, sliced_output_axis);
|
||||
FAIL() << "Loop was created with incorrect axis of concatenated slices output.";
|
||||
}
|
||||
catch (const std::exception& error)
|
||||
@ -690,7 +682,6 @@ TEST(type_prop, loop_operation_infinite_loop_mode_dynamic_iter_dynamic_shapes)
|
||||
EXPECT_NE(output_desc, nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
auto result0 = make_shared<opset5::Result>(out0);
|
||||
auto result1 = make_shared<opset5::Result>(out1);
|
||||
auto result2 = make_shared<opset5::Result>(out2);
|
||||
@ -796,7 +787,6 @@ TEST(type_prop, loop_operation_for_mode_10_iter_static_shapes_special_body_ports
|
||||
EXPECT_NE(output_desc, nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
auto result0 = make_shared<opset5::Result>(out0);
|
||||
auto result1 = make_shared<opset5::Result>(out1);
|
||||
auto result2 = make_shared<opset5::Result>(out2);
|
||||
@ -902,7 +892,6 @@ TEST(type_prop, loop_operation_for_mode_10_iter_static_shapes_special_body_ports
|
||||
EXPECT_NE(output_desc, nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
auto result0 = make_shared<opset5::Result>(out0);
|
||||
auto result1 = make_shared<opset5::Result>(out1);
|
||||
auto result2 = make_shared<opset5::Result>(out2);
|
||||
@ -1009,7 +998,6 @@ TEST(type_prop, loop_operation_10_iter_static_shapes_sliced_inputs)
|
||||
EXPECT_NE(output_desc, nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
auto result0 = make_shared<opset5::Result>(out0);
|
||||
auto result1 = make_shared<opset5::Result>(out1);
|
||||
auto result2 = make_shared<opset5::Result>(out2);
|
||||
@ -1121,7 +1109,6 @@ TEST(type_prop, loop_operation_dynamic_iter_dynamic_batch_shapes_sliced_inputs_c
|
||||
EXPECT_NE(output_desc, nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
auto result0 = make_shared<opset5::Result>(out0);
|
||||
auto result1 = make_shared<opset5::Result>(out1);
|
||||
auto result2 = make_shared<opset5::Result>(out2);
|
||||
@ -1236,7 +1223,6 @@ TEST(type_prop, loop_operation_dynamic_iter_dynamic_shapes_sliced_inputs_concate
|
||||
EXPECT_NE(output_desc, nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
auto result0 = make_shared<opset5::Result>(out0);
|
||||
auto result1 = make_shared<opset5::Result>(out1);
|
||||
auto result2 = make_shared<opset5::Result>(out2);
|
||||
|
@ -21,6 +21,180 @@
|
||||
using namespace std;
|
||||
using namespace ngraph;
|
||||
|
||||
TEST(type_prop, static_value_propagation)
|
||||
{
|
||||
auto param = make_shared<op::Parameter>(element::f32, Shape{1, 2, 3});
|
||||
auto shape_of = make_shared<op::v3::ShapeOf>(param);
|
||||
|
||||
auto r = make_shared<op::v1::Reshape>(param, shape_of, false);
|
||||
|
||||
ASSERT_EQ(r->get_element_type(), element::f32);
|
||||
ASSERT_EQ(r->get_shape(), (Shape{1, 2, 3}));
|
||||
}
|
||||
|
||||
TEST(type_prop, interval_value_propagation)
|
||||
{
|
||||
auto param = make_shared<op::Parameter>(element::f32, PartialShape{Dimension(1, 8), 2, 3});
|
||||
auto shape_of = make_shared<op::v3::ShapeOf>(param);
|
||||
|
||||
auto r = make_shared<op::v1::Reshape>(param, shape_of, false);
|
||||
|
||||
ASSERT_EQ(r->get_element_type(), element::f32);
|
||||
ASSERT_EQ(r->get_output_partial_shape(0), PartialShape({Dimension(1, 8), 2, 3}));
|
||||
|
||||
auto shape_of_opset1 = make_shared<op::v0::ShapeOf>(param);
|
||||
|
||||
auto reshape = make_shared<op::v1::Reshape>(param, shape_of_opset1, false);
|
||||
|
||||
ASSERT_EQ(reshape->get_element_type(), element::f32);
|
||||
ASSERT_EQ(reshape->get_output_partial_shape(0), PartialShape({Dimension(1, 8), 2, 3}));
|
||||
}
|
||||
|
||||
TEST(type_prop, static_value_propagation_through_gather)
|
||||
{
|
||||
auto param = make_shared<op::Parameter>(element::f32, Shape{1, 2, 3});
|
||||
auto shape_of = make_shared<op::v3::ShapeOf>(param);
|
||||
auto gather = make_shared<op::v1::Gather>(shape_of,
|
||||
op::Constant::create(element::i64, {3}, {2, 1, 0}),
|
||||
op::Constant::create(element::i64, {}, {0}));
|
||||
|
||||
auto r = make_shared<op::v1::Reshape>(param, gather, false);
|
||||
|
||||
ASSERT_EQ(r->get_element_type(), element::f32);
|
||||
ASSERT_EQ(r->get_shape(), (Shape{3, 2, 1}));
|
||||
}
|
||||
|
||||
TEST(type_prop, interval_value_propagation_through_gather)
|
||||
{
|
||||
auto param = make_shared<op::Parameter>(element::f32, PartialShape{Dimension(1, 8), 2, 3});
|
||||
auto shape_of = make_shared<op::v3::ShapeOf>(param);
|
||||
auto gather = make_shared<op::v1::Gather>(shape_of,
|
||||
op::Constant::create(element::i64, {3}, {2, 1, 0}),
|
||||
op::Constant::create(element::i64, {}, {0}));
|
||||
|
||||
auto r = make_shared<op::v1::Reshape>(param, gather, false);
|
||||
|
||||
ASSERT_EQ(r->get_element_type(), element::f32);
|
||||
ASSERT_EQ(r->get_output_partial_shape(0), PartialShape({3, 2, Dimension(1, 8)}));
|
||||
}
|
||||
|
||||
TEST(type_prop, interval_value_propagation_through_consecutive_gathers)
|
||||
{
|
||||
auto param = make_shared<op::Parameter>(element::f32, PartialShape{Dimension(1, 8), 2, 3});
|
||||
auto shape_of = make_shared<op::v3::ShapeOf>(param);
|
||||
auto gather_1 = make_shared<op::v1::Gather>(shape_of,
|
||||
op::Constant::create(element::i64, {3}, {2, 1, 0}),
|
||||
op::Constant::create(element::i64, {}, {0}));
|
||||
|
||||
auto gather_2 = make_shared<op::v1::Gather>(gather_1,
|
||||
op::Constant::create(element::i64, {3}, {1, 2, 0}),
|
||||
op::Constant::create(element::i64, {}, {0}));
|
||||
|
||||
auto r = make_shared<op::v1::Reshape>(param, gather_2, false);
|
||||
|
||||
ASSERT_EQ(r->get_element_type(), element::f32);
|
||||
ASSERT_EQ(r->get_output_partial_shape(0), PartialShape({2, Dimension(1, 8), 3}));
|
||||
}
|
||||
|
||||
TEST(type_prop, interval_value_propagation_concatenated_gathers)
|
||||
{
|
||||
auto param = make_shared<op::Parameter>(element::f32, PartialShape{Dimension(1, 8), 2, 3});
|
||||
auto shape_of = make_shared<op::v3::ShapeOf>(param);
|
||||
|
||||
auto gather_1 = make_shared<op::v1::Gather>(shape_of,
|
||||
op::Constant::create(element::i64, {}, {2}),
|
||||
op::Constant::create(element::i64, {}, {0}));
|
||||
auto dim_1 = make_shared<op::Unsqueeze>(gather_1, op::Constant::create(element::i64, {1}, {0}));
|
||||
|
||||
auto gather_2 = make_shared<op::v1::Gather>(shape_of,
|
||||
op::Constant::create(element::i64, {}, {1}),
|
||||
op::Constant::create(element::i64, {}, {0}));
|
||||
auto tmp_dim_2 = make_shared<op::v1::Reshape>(
|
||||
gather_2, op::Constant::create(element::i64, {2}, {1, 1}), true);
|
||||
auto dim_2 =
|
||||
make_shared<op::v0::Squeeze>(tmp_dim_2, op::Constant::create(element::i64, {1}, {0}));
|
||||
|
||||
auto gather_3 = make_shared<op::v1::Gather>(shape_of,
|
||||
op::Constant::create(element::i64, {}, {0}),
|
||||
op::Constant::create(element::i64, {}, {0}));
|
||||
auto dim_3 = make_shared<op::Unsqueeze>(gather_3, op::Constant::create(element::i64, {1}, {0}));
|
||||
|
||||
auto shape = make_shared<op::Concat>(OutputVector{dim_1, dim_2, dim_3}, 0);
|
||||
auto r = make_shared<op::v1::Reshape>(param, shape, false);
|
||||
|
||||
ASSERT_EQ(r->get_element_type(), element::f32);
|
||||
ASSERT_EQ(r->get_output_partial_shape(0), PartialShape({3, 2, Dimension(1, 8)}));
|
||||
}
|
||||
|
||||
TEST(type_prop, interval_value_propagation_mul_div)
|
||||
{
|
||||
auto param = make_shared<op::Parameter>(element::f32,
|
||||
PartialShape{Dimension(2, 8), Dimension(4, 16), 2});
|
||||
|
||||
auto shape_of = make_shared<op::v3::ShapeOf>(param);
|
||||
auto cast_fp = make_shared<op::Convert>(shape_of, element::f32);
|
||||
auto mul = make_shared<op::v1::Multiply>(cast_fp,
|
||||
op::Constant::create(element::f32, {3}, {-2, 2, -4}));
|
||||
auto div =
|
||||
make_shared<op::v1::Divide>(mul, op::Constant::create(element::f32, {3}, {-2, 2, -4}));
|
||||
auto cast_int = make_shared<op::Convert>(div, element::i32);
|
||||
|
||||
auto r = make_shared<op::v1::Reshape>(param, cast_int, false);
|
||||
|
||||
ASSERT_EQ(r->get_element_type(), element::f32);
|
||||
ASSERT_EQ(r->get_output_partial_shape(0), PartialShape({Dimension(2, 8), Dimension(4, 16), 2}));
|
||||
}
|
||||
|
||||
TEST(type_prop, interval_value_propagation_reduce)
|
||||
{
|
||||
auto param = make_shared<op::Parameter>(element::f32, PartialShape{Dimension(1, 8), 2, 3});
|
||||
auto shape_of = make_shared<op::v3::ShapeOf>(param);
|
||||
auto reduce_prod = make_shared<op::v1::ReduceProd>(
|
||||
shape_of, op::Constant::create(element::i64, {1}, {0}), true);
|
||||
auto r = make_shared<op::v1::Reshape>(param, reduce_prod, false);
|
||||
|
||||
ASSERT_EQ(r->get_element_type(), element::f32);
|
||||
ASSERT_EQ(r->get_output_partial_shape(0), PartialShape{Dimension(6, 48)});
|
||||
}
|
||||
|
||||
TEST(type_prop, interval_value_propagation_reshape_zero_special_value)
|
||||
{
|
||||
auto param = make_shared<op::Parameter>(
|
||||
element::f32, PartialShape{Dimension(1, 8), Dimension(16, 64), 3, Dimension(200, 400)});
|
||||
auto shape_of = make_shared<op::v3::ShapeOf>(param);
|
||||
|
||||
auto dim_021 = make_shared<op::v1::Gather>(shape_of,
|
||||
op::Constant::create(element::i64, {3}, {0, 2, 1}),
|
||||
op::Constant::create(element::i64, {}, {0}));
|
||||
auto dim_3 = op::Constant::create(element::i64, {1}, {0});
|
||||
|
||||
auto shape = make_shared<op::Concat>(OutputVector{dim_021, dim_3}, 0);
|
||||
auto r = make_shared<op::v1::Reshape>(param, shape, true);
|
||||
|
||||
ASSERT_EQ(r->get_element_type(), element::f32);
|
||||
ASSERT_EQ(r->get_output_partial_shape(0),
|
||||
PartialShape({Dimension(1, 8), 3, Dimension(16, 64), Dimension(200, 400)}));
|
||||
}
|
||||
|
||||
TEST(type_prop, interval_value_propagation_reshape_zero_minus_one_special_values)
|
||||
{
|
||||
auto param = make_shared<op::Parameter>(
|
||||
element::f32, PartialShape{Dimension(1, 8), Dimension(16, 64), 6, Dimension(200, 400)});
|
||||
auto shape_of = make_shared<op::v3::ShapeOf>(param);
|
||||
|
||||
auto dim_0 = make_shared<op::v1::Gather>(shape_of,
|
||||
op::Constant::create(element::i64, {1}, {1}),
|
||||
op::Constant::create(element::i64, {}, {0}));
|
||||
auto dim_1 = op::Constant::create(element::i64, {1}, {0});
|
||||
auto dim_2 = op::Constant::create(element::i64, {1}, {-1});
|
||||
|
||||
auto shape = make_shared<op::Concat>(OutputVector{dim_0, dim_1, dim_2}, 0);
|
||||
auto r = make_shared<op::v1::Reshape>(param, shape, true);
|
||||
ASSERT_EQ(r->get_element_type(), element::f32);
|
||||
ASSERT_EQ(r->get_output_partial_shape(0),
|
||||
PartialShape({Dimension(16, 64), Dimension(16, 64), Dimension(19, 1200)}));
|
||||
}
|
||||
|
||||
TEST(type_prop, reshape_deduce_s2t)
|
||||
{
|
||||
auto param = make_shared<op::Parameter>(element::f32, Shape{});
|
||||
|
@ -106,7 +106,6 @@ TEST(type_prop, tensor_iterator_2_slice_inputs_part_size_2)
|
||||
// Output 1 is concat of Zos
|
||||
// start=0, stride=2, part_size=2, end=39, axis=1
|
||||
auto out1 = tensor_iterator->get_concatenated_slices(Zo, 0, 2, 2, 39, 1);
|
||||
|
||||
auto result0 = make_shared<op::Result>(out0);
|
||||
auto result1 = make_shared<op::Result>(out1);
|
||||
Shape out0_shape{32, 2, 10};
|
||||
@ -189,7 +188,6 @@ TEST(type_prop, tensor_iterator_2_slice_inputs_part_size_2_dynamic)
|
||||
EXPECT_NE(output_desc, nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
auto result0 = make_shared<op::Result>(out0);
|
||||
auto result1 = make_shared<op::Result>(out1);
|
||||
Shape out0_shape{32, 2, 10};
|
||||
|
@ -47,3 +47,12 @@ TEST(type_prop, tile_few_repeats)
|
||||
ASSERT_EQ(top->get_element_type(), element::f32);
|
||||
ASSERT_EQ(top->get_shape(), (Shape{6, 32, 10}));
|
||||
}
|
||||
|
||||
TEST(type_prop, tile_few_repeats_dyn_input)
|
||||
{
|
||||
auto param0 = make_shared<op::Parameter>(element::f32, PartialShape{6, Dimension(8, 10), 10});
|
||||
auto param1 = op::Constant::create(element::i64, Shape{2}, {4, 1});
|
||||
auto top = make_shared<op::v0::Tile>(param0, param1);
|
||||
ASSERT_EQ(top->get_element_type(), element::f32);
|
||||
ASSERT_EQ(top->get_output_partial_shape(0), (PartialShape{6, Dimension(32, 40), 10}));
|
||||
}
|
||||
|
@ -111,7 +111,7 @@ TYPED_TEST_P(topk_type_prop, topk_rank_static_k_unknown)
|
||||
const auto convert_k = make_shared<op::v0::Convert>(k, element::i32);
|
||||
const auto topk = make_shared<TypeParam>(data, convert_k, axis, "max", "value");
|
||||
|
||||
const PartialShape ranged_dynamic_axis_shape{1, Dimension{5, 10}, 100};
|
||||
const PartialShape ranged_dynamic_axis_shape{1, Dimension{5}, 100};
|
||||
EXPECT_EQ(topk->get_output_partial_shape(0), ranged_dynamic_axis_shape);
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user