Deprecate global element types (#3444)
* Removed global types * Fixed ONNX importer and nGraph tests * Fixed code style * Disable some warnings for Windows * Try to fix python build * Extend deprecation message * Fixed code style * Fixed comments
This commit is contained in:
parent
2a478841ac
commit
071fb9d1c6
@ -103,9 +103,9 @@ methods have been decorated with deprecated warnings which may be enabled by set
|
||||
To update, remove the passed argument. For example,
|
||||
```C++
|
||||
// Old
|
||||
make_shared<Parameter>(make_shared<descriptor::TensorViewType>(element::f32, Shape{2, 4}));
|
||||
make_shared<Parameter>(make_shared<descriptor::TensorViewType>(element::Type_t::f32, Shape{2, 4}));
|
||||
// New (remove TensorViewType)
|
||||
make_shared<Parameter>(element::f32, Shape{2, 4});
|
||||
make_shared<Parameter>(element::Type_t::f32, Shape{2, 4});
|
||||
|
||||
// Old
|
||||
make_shared<Function>(results, result_type, parameters);
|
||||
|
@ -169,7 +169,7 @@ namespace ngraph
|
||||
std::size_t start_match_axis)
|
||||
{
|
||||
auto shape_const =
|
||||
op::Constant::create(element::u64, Shape{new_shape.size()}, new_shape);
|
||||
op::Constant::create(element::Type_t::u64, Shape{new_shape.size()}, new_shape);
|
||||
return std::make_shared<op::v1::Broadcast>(
|
||||
value,
|
||||
shape_const,
|
||||
|
@ -177,8 +177,8 @@ namespace ngraph
|
||||
|
||||
if (!broadcast_axes.empty())
|
||||
{
|
||||
auto shape_const =
|
||||
op::Constant::create(element::u64, Shape{output_shape.size()}, output_shape);
|
||||
auto shape_const = op::Constant::create(
|
||||
element::Type_t::u64, Shape{output_shape.size()}, output_shape);
|
||||
broadcasted_node = make_shared<op::v1::Broadcast>(
|
||||
broadcasted_node,
|
||||
shape_const,
|
||||
@ -236,8 +236,8 @@ namespace ngraph
|
||||
trimmed_value = builder::opset1::reshape(value, trimmed_value_shape);
|
||||
}
|
||||
|
||||
auto shape_const =
|
||||
op::Constant::create(element::u64, Shape{output_shape.size()}, output_shape);
|
||||
auto shape_const = op::Constant::create(
|
||||
element::Type_t::u64, Shape{output_shape.size()}, output_shape);
|
||||
auto value_bcast = make_shared<op::v1::Broadcast>(
|
||||
trimmed_value, shape_const, opset1::get_axes_mapping_output(output_shape, axes));
|
||||
|
||||
@ -354,7 +354,8 @@ namespace ngraph
|
||||
iota(begin(axes) + start_match_axis, end(axes), start_match_axis + input_shape.size());
|
||||
|
||||
auto axes_mapping = opset1::get_axes_mapping(output_shape, axes);
|
||||
return op::Constant::create(element::i64, Shape{axes_mapping.size()}, axes_mapping);
|
||||
return op::Constant::create(
|
||||
element::Type_t::i64, Shape{axes_mapping.size()}, axes_mapping);
|
||||
}
|
||||
|
||||
namespace opset1
|
||||
@ -434,14 +435,15 @@ namespace ngraph
|
||||
vector<size_t> mapping(input_shape.size());
|
||||
iota(begin(mapping), end(mapping), start_match_axis);
|
||||
|
||||
return op::Constant::create(element::i64, Shape{mapping.size()}, mapping);
|
||||
return op::Constant::create(element::Type_t::i64, Shape{mapping.size()}, mapping);
|
||||
}
|
||||
|
||||
Output<Node> get_axes_mapping_output(const Shape& output_shape,
|
||||
const AxisSet& broadcast_axes)
|
||||
{
|
||||
vector<size_t> axes_mapping{get_axes_mapping(output_shape, broadcast_axes)};
|
||||
return op::Constant::create(element::i64, Shape{axes_mapping.size()}, axes_mapping);
|
||||
return op::Constant::create(
|
||||
element::Type_t::i64, Shape{axes_mapping.size()}, axes_mapping);
|
||||
}
|
||||
|
||||
Output<Node> make_broadcast(const Output<Node>& node,
|
||||
@ -450,7 +452,8 @@ namespace ngraph
|
||||
{
|
||||
return make_shared<op::v1::Broadcast>(
|
||||
node,
|
||||
op::Constant::create(element::i64, Shape{target_shape.size()}, target_shape),
|
||||
op::Constant::create(
|
||||
element::Type_t::i64, Shape{target_shape.size()}, target_shape),
|
||||
get_axes_mapping_output(target_shape, broadcast_axes));
|
||||
}
|
||||
|
||||
@ -460,7 +463,8 @@ namespace ngraph
|
||||
{
|
||||
return make_shared<op::v1::Broadcast>(
|
||||
node,
|
||||
op::Constant::create(element::i64, Shape{target_shape.size()}, target_shape),
|
||||
op::Constant::create(
|
||||
element::Type_t::i64, Shape{target_shape.size()}, target_shape),
|
||||
get_axes_mapping_output(target_shape, node.get_shape(), start_match_axis));
|
||||
}
|
||||
|
||||
|
@ -49,10 +49,10 @@ namespace ngraph
|
||||
const auto dim_values = std::make_shared<ngraph::opset1::Gather>(
|
||||
value_shape,
|
||||
reduction_axes,
|
||||
ngraph::opset1::Constant::create(element::i64, {}, {0}));
|
||||
ngraph::opset1::Constant::create(element::Type_t::i64, {}, {0}));
|
||||
|
||||
return std::make_shared<ngraph::opset1::ReduceProd>(
|
||||
dim_values, ngraph::opset1::Constant::create(element::i64, {}, {0}));
|
||||
dim_values, ngraph::opset1::Constant::create(element::Type_t::i64, {}, {0}));
|
||||
}
|
||||
|
||||
std::shared_ptr<Node> builder::opset1::mean(const Output<Node>& value,
|
||||
@ -62,7 +62,7 @@ namespace ngraph
|
||||
std::shared_ptr<Node> elems_number;
|
||||
const auto value_elem_type = value.get_element_type();
|
||||
const auto reduction_axes_const = ngraph::opset1::Constant::create(
|
||||
element::i64, Shape{reduction_axes.size()}, reduction_axes.to_vector());
|
||||
element::Type_t::i64, Shape{reduction_axes.size()}, reduction_axes.to_vector());
|
||||
const auto value_elems_sum =
|
||||
std::make_shared<ngraph::opset1::ReduceSum>(value, reduction_axes_const, keep_dims);
|
||||
if (value.get_partial_shape().is_static())
|
||||
@ -109,7 +109,7 @@ namespace ngraph
|
||||
diff = std::make_shared<ngraph::opset1::ReduceSum>(
|
||||
std::make_shared<ngraph::opset1::Multiply>(diff, diff),
|
||||
ngraph::opset1::Constant::create(
|
||||
element::i64, Shape{reduction_axes.size()}, reduction_axes.to_vector()),
|
||||
element::Type_t::i64, Shape{reduction_axes.size()}, reduction_axes.to_vector()),
|
||||
false);
|
||||
|
||||
const auto& et = value.get_element_type();
|
||||
|
@ -47,13 +47,13 @@ shared_ptr<Node> builder::opset1::reshape(const Output<Node>& value, const Shape
|
||||
auto value_rank = value.get_shape().size();
|
||||
AxisVector axes_vector(value_rank);
|
||||
std::iota(axes_vector.begin(), axes_vector.end(), 0);
|
||||
auto axes = op::Constant::create(element::i64, Shape{value_rank}, axes_vector);
|
||||
auto axes = op::Constant::create(element::Type_t::i64, Shape{value_rank}, axes_vector);
|
||||
return std::make_shared<op::Squeeze>(value, axes);
|
||||
}
|
||||
else
|
||||
{
|
||||
auto out_pattern = op::Constant::create(
|
||||
element::i64, Shape{shape.size()}, vector<int64_t>(shape.begin(), shape.end()));
|
||||
element::Type_t::i64, Shape{shape.size()}, vector<int64_t>(shape.begin(), shape.end()));
|
||||
|
||||
return make_shared<ngraph::opset1::Reshape>(value, out_pattern, false)
|
||||
->add_provenance_group_members_above({value});
|
||||
@ -63,7 +63,7 @@ shared_ptr<Node> builder::opset1::reshape(const Output<Node>& value, const Shape
|
||||
shared_ptr<Node> builder::opset1::reorder_axes(const Output<Node>& value, vector<size_t> axes_order)
|
||||
{
|
||||
const auto axes_order_const =
|
||||
op::Constant::create(element::i64,
|
||||
op::Constant::create(element::Type_t::i64,
|
||||
Shape{axes_order.size()},
|
||||
vector<int64_t>(axes_order.begin(), axes_order.end()));
|
||||
return make_shared<ngraph::opset1::Transpose>(value, axes_order_const)
|
||||
@ -83,7 +83,7 @@ shared_ptr<Node> builder::opset1::transpose(const Output<Node>& value)
|
||||
|
||||
const auto input_rank =
|
||||
std::make_shared<ngraph::opset1::ShapeOf>(std::make_shared<ngraph::opset1::ShapeOf>(value));
|
||||
const auto neg_one = ngraph::opset1::Constant::create(element::i64, Shape{}, {-1});
|
||||
const auto neg_one = ngraph::opset1::Constant::create(element::Type_t::i64, Shape{}, {-1});
|
||||
const auto start_node = std::make_shared<ngraph::opset1::Add>(input_rank, neg_one);
|
||||
const auto reverse_axes_order =
|
||||
std::make_shared<ngraph::opset1::Range>(reshape(start_node, Shape{}), // start
|
||||
@ -114,7 +114,7 @@ namespace ngraph
|
||||
get_normalized_axis_node(const std::shared_ptr<Node> node_rank, int64_t axis)
|
||||
{
|
||||
auto axis_node =
|
||||
ngraph::opset1::Constant::create(element::i64, Shape{1}, {axis});
|
||||
ngraph::opset1::Constant::create(element::Type_t::i64, Shape{1}, {axis});
|
||||
// shortcut for alredy positive value
|
||||
if (axis >= 0)
|
||||
{
|
||||
@ -138,11 +138,11 @@ shared_ptr<Node> builder::opset1::flatten(const Output<Node>& value, int axis)
|
||||
shared_ptr<Node> output_shape;
|
||||
if (axis == 0)
|
||||
{
|
||||
output_shape = ngraph::opset1::Constant::create(element::i64, Shape{2}, {1, -1});
|
||||
output_shape = ngraph::opset1::Constant::create(element::Type_t::i64, Shape{2}, {1, -1});
|
||||
}
|
||||
else if (axis == 1)
|
||||
{
|
||||
output_shape = ngraph::opset1::Constant::create(element::i64, Shape{2}, {0, -1});
|
||||
output_shape = ngraph::opset1::Constant::create(element::Type_t::i64, Shape{2}, {0, -1});
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -152,15 +152,15 @@ shared_ptr<Node> builder::opset1::flatten(const Output<Node>& value, int axis)
|
||||
|
||||
const auto first_part_dims = make_shared<ngraph::opset1::StridedSlice>(
|
||||
value_shape,
|
||||
ngraph::opset1::Constant::create(element::i64, {1}, {0}),
|
||||
ngraph::opset1::Constant::create(element::Type_t::i64, {1}, {0}),
|
||||
axis_node,
|
||||
vector<int64_t>{},
|
||||
vector<int64_t>{});
|
||||
const auto first_part_dims_length = make_shared<ngraph::opset1::ReduceProd>(
|
||||
first_part_dims, ngraph::opset1::Constant::create(element::i64, {}, {0}), true);
|
||||
first_part_dims, ngraph::opset1::Constant::create(element::Type_t::i64, {}, {0}), true);
|
||||
|
||||
const auto remaining_part_length =
|
||||
ngraph::opset1::Constant::create(element::i64, {1}, {-1});
|
||||
ngraph::opset1::Constant::create(element::Type_t::i64, {1}, {-1});
|
||||
|
||||
output_shape = make_shared<ngraph::opset1::Concat>(
|
||||
OutputVector{first_part_dims_length, remaining_part_length}, 0);
|
||||
@ -230,19 +230,21 @@ shared_ptr<Node> builder::opset1::collapse(const Output<Node>& value,
|
||||
const auto rank = make_shared<ngraph::opset1::ShapeOf>(shape);
|
||||
|
||||
// Split lengths used in VariadicSplit
|
||||
const auto start_axis_node = ngraph::opset1::Constant::create(element::i64, {1}, {start_axis});
|
||||
const auto end_axis_node = ngraph::opset1::Constant::create(element::i64, {1}, {end_axis + 1});
|
||||
const auto start_axis_node =
|
||||
ngraph::opset1::Constant::create(element::Type_t::i64, {1}, {start_axis});
|
||||
const auto end_axis_node =
|
||||
ngraph::opset1::Constant::create(element::Type_t::i64, {1}, {end_axis + 1});
|
||||
const auto collapsed_axis =
|
||||
make_shared<ngraph::opset1::Subtract>(end_axis_node, start_axis_node);
|
||||
const auto post_axis = make_shared<ngraph::opset1::Subtract>(rank, end_axis_node);
|
||||
|
||||
const auto split_lengths = make_shared<ngraph::opset1::Concat>(
|
||||
OutputVector{start_axis_node, collapsed_axis, post_axis}, 0);
|
||||
const auto split_axis = ngraph::opset1::Constant::create(element::i64, {}, {0});
|
||||
const auto split_axis = ngraph::opset1::Constant::create(element::Type_t::i64, {}, {0});
|
||||
const auto split_node =
|
||||
make_shared<ngraph::opset1::VariadicSplit>(shape, split_axis, split_lengths);
|
||||
|
||||
const auto reduced_axis = ngraph::opset1::Constant::create(element::i64, {1}, {0});
|
||||
const auto reduced_axis = ngraph::opset1::Constant::create(element::Type_t::i64, {1}, {0});
|
||||
const auto collapsed_axis_size =
|
||||
make_shared<ngraph::opset1::ReduceProd>(split_node->output(1), reduced_axis, true);
|
||||
|
||||
|
@ -25,9 +25,9 @@ OutputVector builder::opset1::split(const Output<Node>& value,
|
||||
const std::vector<size_t>& split_lengths,
|
||||
int64_t axis)
|
||||
{
|
||||
const auto axis_node = ngraph::opset1::Constant::create(element::i64, Shape{}, {axis});
|
||||
const auto split_lengths_node =
|
||||
ngraph::opset1::Constant::create(element::u64, Shape{split_lengths.size()}, split_lengths);
|
||||
const auto axis_node = ngraph::opset1::Constant::create(element::Type_t::i64, Shape{}, {axis});
|
||||
const auto split_lengths_node = ngraph::opset1::Constant::create(
|
||||
element::Type_t::u64, Shape{split_lengths.size()}, split_lengths);
|
||||
const auto variadic_split =
|
||||
std::make_shared<ngraph::opset1::VariadicSplit>(value, axis_node, split_lengths_node);
|
||||
|
||||
@ -36,7 +36,7 @@ OutputVector builder::opset1::split(const Output<Node>& value,
|
||||
|
||||
OutputVector builder::opset1::split(const Output<Node>& value, size_t num_splits, int64_t axis)
|
||||
{
|
||||
const auto axis_node = ngraph::opset1::Constant::create(element::i64, Shape{}, {axis});
|
||||
const auto axis_node = ngraph::opset1::Constant::create(element::Type_t::i64, Shape{}, {axis});
|
||||
const auto split = std::make_shared<ngraph::opset1::Split>(value, axis_node, num_splits);
|
||||
|
||||
return split->outputs();
|
||||
|
@ -40,7 +40,7 @@ namespace ngraph
|
||||
/// edge of interval. default true = includes right edge
|
||||
Bucketize(const Output<Node>& data,
|
||||
const Output<Node>& buckets,
|
||||
const element::Type output_type = element::i64,
|
||||
const element::Type output_type = element::Type_t::i64,
|
||||
const bool with_right_bound = true);
|
||||
|
||||
virtual void validate_and_infer_types() override;
|
||||
|
@ -273,31 +273,31 @@ namespace ngraph
|
||||
}
|
||||
|
||||
/// \brief Returns the value of the constant node as a Shape object
|
||||
/// Can only be used on element::i64 nodes and interprets
|
||||
/// Can only be used on element::Type_t::i64 nodes and interprets
|
||||
/// negative values as zeros.
|
||||
Shape get_shape_val() const;
|
||||
/// \brief Returns the value of the constant node as a Strides
|
||||
/// object
|
||||
/// Can only be used on element::i64 nodes and interprets
|
||||
/// Can only be used on element::Type_t::i64 nodes and interprets
|
||||
/// negative values as zeros.
|
||||
Strides get_strides_val() const;
|
||||
/// \brief Returns the value of the constant node as a Coordinate
|
||||
/// object
|
||||
/// Can only be used on element::i64 nodes and interprets
|
||||
/// Can only be used on element::Type_t::i64 nodes and interprets
|
||||
/// negative values as zeros.
|
||||
Coordinate get_coordinate_val() const;
|
||||
/// \brief Returns the value of the constant node as a
|
||||
/// CoordinateDiff object
|
||||
/// Can only be used on element::i64 nodes.
|
||||
/// Can only be used on element::Type_t::i64 nodes.
|
||||
CoordinateDiff get_coordinate_diff_val() const;
|
||||
/// \brief Returns the value of the constant node as an AxisVector
|
||||
/// object
|
||||
/// Can only be used on element::i64 nodes and interprets
|
||||
/// Can only be used on element::Type_t::i64 nodes and interprets
|
||||
/// negative values as zeros.
|
||||
AxisVector get_axis_vector_val() const;
|
||||
/// \brief Returns the value of the constant node as an AxisSet
|
||||
/// object
|
||||
/// Can only be used on element::i64 nodes and interprets
|
||||
/// Can only be used on element::Type_t::i64 nodes and interprets
|
||||
/// negative values as zeros.
|
||||
/// Repeated values are allowed.
|
||||
AxisSet get_axis_set_val() const;
|
||||
|
@ -117,7 +117,7 @@ namespace ngraph
|
||||
R,
|
||||
B,
|
||||
Constant::create(
|
||||
element::f32,
|
||||
element::Type_t::f32,
|
||||
Shape{(lstm_direction == direction::BIDIRECTIONAL ? 2UL : 1UL),
|
||||
3UL * static_cast<size_t>(hidden_size)},
|
||||
std::vector<float>{0.f}),
|
||||
|
@ -125,14 +125,15 @@ namespace ngraph
|
||||
/// \param sort_result_descending Specifies whether it is necessary to sort selected
|
||||
/// boxes across batches
|
||||
/// \param output_type Specifies the output tensor type
|
||||
NonMaxSuppression(const Output<Node>& boxes,
|
||||
const Output<Node>& scores,
|
||||
const Output<Node>& max_output_boxes_per_class,
|
||||
const Output<Node>& iou_threshold,
|
||||
const Output<Node>& score_threshold,
|
||||
const BoxEncodingType box_encoding = BoxEncodingType::CORNER,
|
||||
const bool sort_result_descending = true,
|
||||
const ngraph::element::Type& output_type = ngraph::element::i64);
|
||||
NonMaxSuppression(
|
||||
const Output<Node>& boxes,
|
||||
const Output<Node>& scores,
|
||||
const Output<Node>& max_output_boxes_per_class,
|
||||
const Output<Node>& iou_threshold,
|
||||
const Output<Node>& score_threshold,
|
||||
const BoxEncodingType box_encoding = BoxEncodingType::CORNER,
|
||||
const bool sort_result_descending = true,
|
||||
const ngraph::element::Type& output_type = ngraph::element::Type_t::i64);
|
||||
|
||||
/// \brief Constructs a NonMaxSuppression operation with default values for the last
|
||||
/// 3 inputs
|
||||
@ -143,11 +144,12 @@ namespace ngraph
|
||||
/// \param sort_result_descending Specifies whether it is necessary to sort selected
|
||||
/// boxes across batches
|
||||
/// \param output_type Specifies the output tensor type
|
||||
NonMaxSuppression(const Output<Node>& boxes,
|
||||
const Output<Node>& scores,
|
||||
const BoxEncodingType box_encoding = BoxEncodingType::CORNER,
|
||||
const bool sort_result_descending = true,
|
||||
const ngraph::element::Type& output_type = ngraph::element::i64);
|
||||
NonMaxSuppression(
|
||||
const Output<Node>& boxes,
|
||||
const Output<Node>& scores,
|
||||
const BoxEncodingType box_encoding = BoxEncodingType::CORNER,
|
||||
const bool sort_result_descending = true,
|
||||
const ngraph::element::Type& output_type = ngraph::element::Type_t::i64);
|
||||
|
||||
bool visit_attributes(AttributeVisitor& visitor) override;
|
||||
void validate_and_infer_types() override;
|
||||
@ -176,7 +178,7 @@ namespace ngraph
|
||||
protected:
|
||||
BoxEncodingType m_box_encoding = BoxEncodingType::CORNER;
|
||||
bool m_sort_result_descending = true;
|
||||
ngraph::element::Type m_output_type = ngraph::element::i64;
|
||||
ngraph::element::Type m_output_type = ngraph::element::Type_t::i64;
|
||||
void validate();
|
||||
int64_t max_boxes_output_from_input() const;
|
||||
};
|
||||
@ -205,14 +207,15 @@ namespace ngraph
|
||||
/// \param sort_result_descending Specifies whether it is necessary to sort selected
|
||||
/// boxes across batches
|
||||
/// \param output_type Specifies the output tensor type
|
||||
NonMaxSuppression(const Output<Node>& boxes,
|
||||
const Output<Node>& scores,
|
||||
const Output<Node>& max_output_boxes_per_class,
|
||||
const Output<Node>& iou_threshold,
|
||||
const Output<Node>& score_threshold,
|
||||
const BoxEncodingType box_encoding = BoxEncodingType::CORNER,
|
||||
const bool sort_result_descending = true,
|
||||
const ngraph::element::Type& output_type = ngraph::element::i64);
|
||||
NonMaxSuppression(
|
||||
const Output<Node>& boxes,
|
||||
const Output<Node>& scores,
|
||||
const Output<Node>& max_output_boxes_per_class,
|
||||
const Output<Node>& iou_threshold,
|
||||
const Output<Node>& score_threshold,
|
||||
const BoxEncodingType box_encoding = BoxEncodingType::CORNER,
|
||||
const bool sort_result_descending = true,
|
||||
const ngraph::element::Type& output_type = ngraph::element::Type_t::i64);
|
||||
|
||||
/// \brief Constructs a NonMaxSuppression operation with default values for the last
|
||||
/// 3 inputs
|
||||
@ -223,11 +226,12 @@ namespace ngraph
|
||||
/// \param sort_result_descending Specifies whether it is necessary to sort selected
|
||||
/// boxes across batches
|
||||
/// \param output_type Specifies the output tensor type
|
||||
NonMaxSuppression(const Output<Node>& boxes,
|
||||
const Output<Node>& scores,
|
||||
const BoxEncodingType box_encoding = BoxEncodingType::CORNER,
|
||||
const bool sort_result_descending = true,
|
||||
const ngraph::element::Type& output_type = ngraph::element::i64);
|
||||
NonMaxSuppression(
|
||||
const Output<Node>& boxes,
|
||||
const Output<Node>& scores,
|
||||
const BoxEncodingType box_encoding = BoxEncodingType::CORNER,
|
||||
const bool sort_result_descending = true,
|
||||
const ngraph::element::Type& output_type = ngraph::element::Type_t::i64);
|
||||
|
||||
void validate_and_infer_types() override;
|
||||
|
||||
@ -261,11 +265,12 @@ namespace ngraph
|
||||
/// \param sort_result_descending Specifies whether it is necessary to sort selected
|
||||
/// boxes across batches
|
||||
/// \param output_type Specifies the output tensor type
|
||||
NonMaxSuppression(const Output<Node>& boxes,
|
||||
const Output<Node>& scores,
|
||||
const BoxEncodingType box_encoding = BoxEncodingType::CORNER,
|
||||
const bool sort_result_descending = true,
|
||||
const ngraph::element::Type& output_type = ngraph::element::i64);
|
||||
NonMaxSuppression(
|
||||
const Output<Node>& boxes,
|
||||
const Output<Node>& scores,
|
||||
const BoxEncodingType box_encoding = BoxEncodingType::CORNER,
|
||||
const bool sort_result_descending = true,
|
||||
const ngraph::element::Type& output_type = ngraph::element::Type_t::i64);
|
||||
|
||||
/// \brief Constructs a NonMaxSuppression operation with default values in the last.
|
||||
/// 3 inputs.
|
||||
@ -278,12 +283,13 @@ namespace ngraph
|
||||
/// \param sort_result_descending Specifies whether it is necessary to sort selected
|
||||
/// boxes across batches
|
||||
/// \param output_type Specifies the output tensor type
|
||||
NonMaxSuppression(const Output<Node>& boxes,
|
||||
const Output<Node>& scores,
|
||||
const Output<Node>& max_output_boxes_per_class,
|
||||
const BoxEncodingType box_encoding = BoxEncodingType::CORNER,
|
||||
const bool sort_result_descending = true,
|
||||
const ngraph::element::Type& output_type = ngraph::element::i64);
|
||||
NonMaxSuppression(
|
||||
const Output<Node>& boxes,
|
||||
const Output<Node>& scores,
|
||||
const Output<Node>& max_output_boxes_per_class,
|
||||
const BoxEncodingType box_encoding = BoxEncodingType::CORNER,
|
||||
const bool sort_result_descending = true,
|
||||
const ngraph::element::Type& output_type = ngraph::element::Type_t::i64);
|
||||
|
||||
/// \brief Constructs a NonMaxSuppression operation with default values in the last.
|
||||
/// 2 inputs.
|
||||
@ -297,13 +303,14 @@ namespace ngraph
|
||||
/// \param sort_result_descending Specifies whether it is necessary to sort selected
|
||||
/// boxes across batches
|
||||
/// \param output_type Specifies the output tensor type
|
||||
NonMaxSuppression(const Output<Node>& boxes,
|
||||
const Output<Node>& scores,
|
||||
const Output<Node>& max_output_boxes_per_class,
|
||||
const Output<Node>& iou_threshold,
|
||||
const BoxEncodingType box_encoding = BoxEncodingType::CORNER,
|
||||
const bool sort_result_descending = true,
|
||||
const ngraph::element::Type& output_type = ngraph::element::i64);
|
||||
NonMaxSuppression(
|
||||
const Output<Node>& boxes,
|
||||
const Output<Node>& scores,
|
||||
const Output<Node>& max_output_boxes_per_class,
|
||||
const Output<Node>& iou_threshold,
|
||||
const BoxEncodingType box_encoding = BoxEncodingType::CORNER,
|
||||
const bool sort_result_descending = true,
|
||||
const ngraph::element::Type& output_type = ngraph::element::Type_t::i64);
|
||||
|
||||
/// \brief Constructs a NonMaxSuppression operation with default value in the last.
|
||||
/// input.
|
||||
@ -318,14 +325,15 @@ namespace ngraph
|
||||
/// \param sort_result_descending Specifies whether it is necessary to sort selected
|
||||
/// boxes across batches
|
||||
/// \param output_type Specifies the output tensor type
|
||||
NonMaxSuppression(const Output<Node>& boxes,
|
||||
const Output<Node>& scores,
|
||||
const Output<Node>& max_output_boxes_per_class,
|
||||
const Output<Node>& iou_threshold,
|
||||
const Output<Node>& score_threshold,
|
||||
const BoxEncodingType box_encoding = BoxEncodingType::CORNER,
|
||||
const bool sort_result_descending = true,
|
||||
const ngraph::element::Type& output_type = ngraph::element::i64);
|
||||
NonMaxSuppression(
|
||||
const Output<Node>& boxes,
|
||||
const Output<Node>& scores,
|
||||
const Output<Node>& max_output_boxes_per_class,
|
||||
const Output<Node>& iou_threshold,
|
||||
const Output<Node>& score_threshold,
|
||||
const BoxEncodingType box_encoding = BoxEncodingType::CORNER,
|
||||
const bool sort_result_descending = true,
|
||||
const ngraph::element::Type& output_type = ngraph::element::Type_t::i64);
|
||||
|
||||
/// \brief Constructs a NonMaxSuppression operation.
|
||||
///
|
||||
@ -340,15 +348,16 @@ namespace ngraph
|
||||
/// \param sort_result_descending Specifies whether it is necessary to sort selected
|
||||
/// boxes across batches
|
||||
/// \param output_type Specifies the output tensor type
|
||||
NonMaxSuppression(const Output<Node>& boxes,
|
||||
const Output<Node>& scores,
|
||||
const Output<Node>& max_output_boxes_per_class,
|
||||
const Output<Node>& iou_threshold,
|
||||
const Output<Node>& score_threshold,
|
||||
const Output<Node>& soft_nms_sigma,
|
||||
const BoxEncodingType box_encoding = BoxEncodingType::CORNER,
|
||||
const bool sort_result_descending = true,
|
||||
const ngraph::element::Type& output_type = ngraph::element::i64);
|
||||
NonMaxSuppression(
|
||||
const Output<Node>& boxes,
|
||||
const Output<Node>& scores,
|
||||
const Output<Node>& max_output_boxes_per_class,
|
||||
const Output<Node>& iou_threshold,
|
||||
const Output<Node>& score_threshold,
|
||||
const Output<Node>& soft_nms_sigma,
|
||||
const BoxEncodingType box_encoding = BoxEncodingType::CORNER,
|
||||
const bool sort_result_descending = true,
|
||||
const ngraph::element::Type& output_type = ngraph::element::Type_t::i64);
|
||||
|
||||
bool visit_attributes(AttributeVisitor& visitor) override;
|
||||
void validate_and_infer_types() override;
|
||||
@ -382,7 +391,7 @@ namespace ngraph
|
||||
protected:
|
||||
BoxEncodingType m_box_encoding = BoxEncodingType::CORNER;
|
||||
bool m_sort_result_descending = true;
|
||||
ngraph::element::Type m_output_type = ngraph::element::i64;
|
||||
ngraph::element::Type m_output_type = ngraph::element::Type_t::i64;
|
||||
void validate();
|
||||
};
|
||||
} // namespace v5
|
||||
|
@ -74,7 +74,7 @@ namespace ngraph
|
||||
const HostTensorVector& inputs) const override;
|
||||
|
||||
protected:
|
||||
element::Type m_output_type = element::i64;
|
||||
element::Type m_output_type = element::Type_t::i64;
|
||||
};
|
||||
}
|
||||
using v3::NonZero;
|
||||
|
@ -33,7 +33,8 @@ namespace ngraph
|
||||
const NodeTypeInfo& get_type_info() const override { return type_info; }
|
||||
ScatterNDUpdate() = default;
|
||||
/// \param inputs Tensor
|
||||
/// \param indices Index tensor: Data type must be `element::i32` or `element::i64`
|
||||
/// \param indices Index tensor: Data type must be `element::Type_t::i32` or
|
||||
/// `element::Type_t::i64`
|
||||
/// \param updates Tensor: Must have same type as inputs
|
||||
ScatterNDUpdate(const Output<Node>& inputs,
|
||||
const Output<Node>& indices,
|
||||
|
@ -32,7 +32,8 @@ namespace ngraph
|
||||
const NodeTypeInfo& get_type_info() const override { return type_info; }
|
||||
ShapeOf() = default;
|
||||
/// \brief Constructs a shape-of operation.
|
||||
ShapeOf(const Output<Node>& arg, const element::Type output_type = element::i64);
|
||||
ShapeOf(const Output<Node>& arg,
|
||||
const element::Type output_type = element::Type_t::i64);
|
||||
|
||||
bool visit_attributes(AttributeVisitor& visitor) override;
|
||||
virtual std::shared_ptr<Node>
|
||||
|
@ -57,14 +57,14 @@ namespace ngraph
|
||||
const int64_t axis,
|
||||
const std::string& mode,
|
||||
const std::string& sort,
|
||||
const element::Type& index_element_type = element::i32);
|
||||
const element::Type& index_element_type = element::Type_t::i32);
|
||||
|
||||
TopK(const Output<Node>& data,
|
||||
const Output<Node>& k,
|
||||
const int64_t axis,
|
||||
const Mode mode,
|
||||
const SortType sort,
|
||||
const element::Type& index_element_type = element::i32);
|
||||
const element::Type& index_element_type = element::Type_t::i32);
|
||||
|
||||
bool visit_attributes(AttributeVisitor& visitor) override;
|
||||
void validate_and_infer_types() override;
|
||||
@ -104,7 +104,7 @@ namespace ngraph
|
||||
uint64_t m_normalized_axis;
|
||||
Mode m_mode;
|
||||
SortType m_sort;
|
||||
element::Type m_index_element_type{element::i32};
|
||||
element::Type m_index_element_type{element::Type_t::i32};
|
||||
|
||||
virtual size_t read_k_from_constant_node(const std::shared_ptr<Node>& node,
|
||||
const element::Type& k_element_type) const;
|
||||
@ -146,14 +146,14 @@ namespace ngraph
|
||||
const int64_t axis,
|
||||
const std::string& mode,
|
||||
const std::string& sort,
|
||||
const element::Type& index_element_type = element::i32);
|
||||
const element::Type& index_element_type = element::Type_t::i32);
|
||||
|
||||
TopK(const Output<Node>& data,
|
||||
const Output<Node>& k,
|
||||
const int64_t axis,
|
||||
const Mode mode,
|
||||
const SortType sort,
|
||||
const element::Type& index_element_type = element::i32);
|
||||
const element::Type& index_element_type = element::Type_t::i32);
|
||||
bool visit_attributes(AttributeVisitor& visitor) override;
|
||||
void validate_and_infer_types() override;
|
||||
virtual std::shared_ptr<Node>
|
||||
|
@ -44,7 +44,7 @@ namespace ngraph
|
||||
Branch()
|
||||
: Pattern(OutputVector{})
|
||||
{
|
||||
set_output_type(0, element::f32, Shape{});
|
||||
set_output_type(0, element::Type_t::f32, Shape{});
|
||||
}
|
||||
|
||||
void set_destination(const Output<Node>& destination)
|
||||
|
@ -47,7 +47,7 @@ namespace ngraph
|
||||
/// Example:
|
||||
/// \code{.cpp}
|
||||
/// auto add = a + b; // a and b are op::Parameter in this example
|
||||
/// auto label = std::make_shared<pattern::op::Label>(element::f32,
|
||||
/// auto label = std::make_shared<pattern::op::Label>(element::Type_t::f32,
|
||||
/// Shape{2,2},
|
||||
/// nullptr,
|
||||
/// OutputVector{add});
|
||||
@ -61,7 +61,7 @@ namespace ngraph
|
||||
set_output_type(0, type, s);
|
||||
}
|
||||
|
||||
explicit Label(const element::Type& type = element::dynamic,
|
||||
explicit Label(const element::Type& type = element::Type_t::dynamic,
|
||||
const PartialShape& s = PartialShape::dynamic())
|
||||
: Label(type, s, [](const Output<Node>&) { return true; }, OutputVector())
|
||||
{
|
||||
|
@ -76,10 +76,12 @@ namespace ngraph
|
||||
/// because when we reconstruct the new x node, it will see that the shapes are inconsistent
|
||||
/// for elementwise add.
|
||||
///
|
||||
/// Specialization of element types is also possible: `element::dynamic` can be specialized
|
||||
/// Specialization of element types is also possible: `element::Type_t::dynamic` can be
|
||||
/// specialized
|
||||
/// to a concrete element type or left dynamic; but a concrete element type can only be
|
||||
/// specialized to itself (e.g., specialization does not allow you to change `element::i32`
|
||||
/// to `element::i64`).
|
||||
/// specialized to itself (e.g., specialization does not allow you to change
|
||||
/// `element::Type_t::i32`
|
||||
/// to `element::Type_t::i64`).
|
||||
///
|
||||
/// Finally, it is possible to specialize parameter values. If the ith element of
|
||||
/// `parameter_values` is not `nullptr`, and fully static element type and shape has been
|
||||
|
@ -91,11 +91,12 @@ namespace ngraph
|
||||
size_t bitwidth() const;
|
||||
// The name of this type, the enum name of this type
|
||||
const std::string& get_type_name() const;
|
||||
bool operator==(const Type_t& other) const;
|
||||
bool operator!=(const Type_t& other) const { return !(*this == other); }
|
||||
bool operator==(const Type& other) const;
|
||||
bool operator!=(const Type& other) const { return !(*this == other); }
|
||||
bool operator<(const Type& other) const;
|
||||
friend NGRAPH_API std::ostream& operator<<(std::ostream&, const Type&);
|
||||
static std::vector<const Type*> get_known_types();
|
||||
|
||||
/// \brief Checks whether this element type is merge-compatible with `t`.
|
||||
/// \param t The element type to compare this element type to.
|
||||
@ -130,21 +131,50 @@ namespace ngraph
|
||||
|
||||
typedef std::vector<Type> TypeVector;
|
||||
|
||||
NGRAPH_DEPRECATED(
|
||||
"This global element type was deprecated. Please use Type_t::undefined instead.")
|
||||
extern NGRAPH_API const Type undefined;
|
||||
NGRAPH_DEPRECATED(
|
||||
"This global element type was deprecated. Please use Type_t::dynamic instead.")
|
||||
extern NGRAPH_API const Type dynamic;
|
||||
NGRAPH_DEPRECATED(
|
||||
"This global element type was deprecated. Please use Type_t::boolean instead.")
|
||||
extern NGRAPH_API const Type boolean;
|
||||
NGRAPH_DEPRECATED(
|
||||
"This global element type was deprecated. Please use Type_t::bf16 instead.")
|
||||
extern NGRAPH_API const Type bf16;
|
||||
NGRAPH_DEPRECATED(
|
||||
"This global element type was deprecated. Please use Type_t::f16 instead.")
|
||||
extern NGRAPH_API const Type f16;
|
||||
NGRAPH_DEPRECATED(
|
||||
"This global element type was deprecated. Please use Type_t::f32 instead.")
|
||||
extern NGRAPH_API const Type f32;
|
||||
NGRAPH_DEPRECATED(
|
||||
"This global element type was deprecated. Please use Type_t::f64 instead.")
|
||||
extern NGRAPH_API const Type f64;
|
||||
NGRAPH_DEPRECATED("This global element type was deprecated. Please use Type_t::i8 instead.")
|
||||
extern NGRAPH_API const Type i8;
|
||||
NGRAPH_DEPRECATED(
|
||||
"This global element type was deprecated. Please use Type_t::i16 instead.")
|
||||
extern NGRAPH_API const Type i16;
|
||||
NGRAPH_DEPRECATED(
|
||||
"This global element type was deprecated. Please use Type_t::i32 instead.")
|
||||
extern NGRAPH_API const Type i32;
|
||||
NGRAPH_DEPRECATED(
|
||||
"This global element type was deprecated. Please use Type_t::i64 instead.")
|
||||
extern NGRAPH_API const Type i64;
|
||||
NGRAPH_DEPRECATED("This global element type was deprecated. Please use Type_t::u1 instead.")
|
||||
extern NGRAPH_API const Type u1;
|
||||
NGRAPH_DEPRECATED("This global element type was deprecated. Please use Type_t::u8 instead.")
|
||||
extern NGRAPH_API const Type u8;
|
||||
NGRAPH_DEPRECATED(
|
||||
"This global element type was deprecated. Please use Type_t::u16 instead.")
|
||||
extern NGRAPH_API const Type u16;
|
||||
NGRAPH_DEPRECATED(
|
||||
"This global element type was deprecated. Please use Type_t::u32 instead.")
|
||||
extern NGRAPH_API const Type u32;
|
||||
NGRAPH_DEPRECATED(
|
||||
"This global element type was deprecated. Please use Type_t::u64 instead.")
|
||||
extern NGRAPH_API const Type u64;
|
||||
|
||||
template <typename T>
|
||||
|
@ -49,8 +49,8 @@ namespace ngraph
|
||||
input_descs.size() + (cur_iter_idx >= 0 ? !cur_iter_initial_value_exist : 0);
|
||||
HostTensorVector inputs_to_body;
|
||||
for (int64_t i = 0; i < inputs_count; ++i)
|
||||
inputs_to_body.push_back(
|
||||
std::make_shared<HostTensor>(element::dynamic, PartialShape::dynamic()));
|
||||
inputs_to_body.push_back(std::make_shared<HostTensor>(element::Type_t::dynamic,
|
||||
PartialShape::dynamic()));
|
||||
if (cur_iter_idx >= 0 && !cur_iter_initial_value_exist)
|
||||
{
|
||||
const auto& cur_iter = func->get_parameters().at(cur_iter_idx);
|
||||
@ -90,12 +90,12 @@ namespace ngraph
|
||||
|
||||
// Get TripCount
|
||||
int64_t trip_count = 0;
|
||||
if (args[0]->get_element_type() == ngraph::element::i32)
|
||||
if (args[0]->get_element_type() == ngraph::element::Type_t::i32)
|
||||
{
|
||||
auto* trip_count_p = args[0]->get_data_ptr<int32_t>();
|
||||
trip_count = trip_count_p[0];
|
||||
}
|
||||
else if (args[0]->get_element_type() == ngraph::element::i64)
|
||||
else if (args[0]->get_element_type() == ngraph::element::Type_t::i64)
|
||||
{
|
||||
auto* trip_count_p = args[0]->get_data_ptr<int64_t>();
|
||||
trip_count = trip_count_p[0];
|
||||
@ -204,10 +204,10 @@ namespace ngraph
|
||||
{
|
||||
const auto& cur_iter_param = func->get_parameters().at(cur_iter_idx);
|
||||
int64_t iter_num = cur_iter + 1;
|
||||
if (cur_iter_param->get_element_type() == element::i64)
|
||||
if (cur_iter_param->get_element_type() == element::Type_t::i64)
|
||||
inputs_to_body.at(cur_iter_idx)
|
||||
->write(&iter_num, cur_iter_param->get_element_type().size());
|
||||
else if (cur_iter_param->get_element_type() == element::i32)
|
||||
else if (cur_iter_param->get_element_type() == element::Type_t::i32)
|
||||
{
|
||||
int32_t iter_num_i32 = static_cast<int32_t>(iter_num);
|
||||
inputs_to_body.at(cur_iter_idx)
|
||||
|
@ -326,7 +326,7 @@ namespace ngraph
|
||||
|
||||
size_t selected_size = valid_outputs * 3;
|
||||
|
||||
if (output_type == ngraph::element::i64)
|
||||
if (output_type == ngraph::element::Type_t::i64)
|
||||
{
|
||||
int64_t* indices_ptr = outputs[0]->get_data_ptr<int64_t>();
|
||||
memcpy(indices_ptr, selected_indices.data(), selected_size * sizeof(int64_t));
|
||||
@ -381,7 +381,7 @@ namespace ngraph
|
||||
return;
|
||||
}
|
||||
|
||||
if (output_type == ngraph::element::i64)
|
||||
if (output_type == ngraph::element::Type_t::i64)
|
||||
{
|
||||
int64_t* valid_outputs_ptr = outputs[2]->get_data_ptr<int64_t>();
|
||||
*valid_outputs_ptr = valid_outputs;
|
||||
|
@ -35,8 +35,8 @@ namespace ngraph
|
||||
{
|
||||
HostTensorVector inputs_to_body;
|
||||
for (int64_t i = 0; i < input_descs.size(); ++i)
|
||||
inputs_to_body.push_back(
|
||||
std::make_shared<HostTensor>(element::dynamic, PartialShape::dynamic()));
|
||||
inputs_to_body.push_back(std::make_shared<HostTensor>(element::Type_t::dynamic,
|
||||
PartialShape::dynamic()));
|
||||
|
||||
// Port map processing: inputs and back edges
|
||||
struct BackEdge
|
||||
|
@ -586,7 +586,7 @@ std::shared_ptr<Node> ngraph::make_zero(const element::Type& element_type, const
|
||||
if (shape.size() > 0)
|
||||
{
|
||||
return std::make_shared<op::v1::Broadcast>(
|
||||
zero, op::Constant::create(element::u64, Shape{shape.size()}, shape));
|
||||
zero, op::Constant::create(element::Type_t::u64, Shape{shape.size()}, shape));
|
||||
}
|
||||
return zero;
|
||||
}
|
||||
|
@ -213,8 +213,8 @@ descriptor::Output& Node::get_output_descriptor(size_t position)
|
||||
while (m_outputs.size() <= position)
|
||||
{
|
||||
size_t i = m_outputs.size();
|
||||
auto tensor_descriptor =
|
||||
make_shared<descriptor::Tensor>(element::dynamic, PartialShape::dynamic(), this, i);
|
||||
auto tensor_descriptor = make_shared<descriptor::Tensor>(
|
||||
element::Type_t::dynamic, PartialShape::dynamic(), this, i);
|
||||
m_outputs.emplace_back(this, i, tensor_descriptor);
|
||||
}
|
||||
return m_outputs.at(position);
|
||||
|
@ -260,7 +260,7 @@ op::v1::Broadcast::Broadcast(const Output<Node>& arg,
|
||||
const AutoBroadcastSpec& broadcast_spec)
|
||||
: util::BroadcastBase{arg,
|
||||
target_shape,
|
||||
op::v0::Constant::create(element::u8, Shape{}, {0})->output(0),
|
||||
op::v0::Constant::create(element::Type_t::u8, Shape{}, {0})->output(0),
|
||||
to_broadcast_mode(broadcast_spec)}
|
||||
, m_broadcast_spec{broadcast_spec}
|
||||
{
|
||||
|
@ -45,7 +45,8 @@ void op::v3::Bucketize::validate_and_infer_types()
|
||||
const PartialShape& buckets_pshape = get_input_partial_shape(1);
|
||||
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
m_output_type == element::i64 || m_output_type == element::i32,
|
||||
m_output_type == element::Type_t::i64 ||
|
||||
m_output_type == element::Type_t::i32,
|
||||
"Output type must be i32 or i64. Default is i64");
|
||||
|
||||
if (buckets_pshape.is_static())
|
||||
|
@ -50,7 +50,7 @@ void op::Concat::validate_and_infer_types()
|
||||
NODE_VALIDATION_CHECK(this, get_input_size() >= 1, "At least one argument required.");
|
||||
|
||||
PartialShape inputs_shape_scheme{PartialShape::dynamic()};
|
||||
element::Type inputs_et{element::dynamic};
|
||||
element::Type inputs_et{element::Type_t::dynamic};
|
||||
Dimension concatenation_axis_output_dim{0};
|
||||
|
||||
for (uint64_t i = 0; i < get_input_size(); i++)
|
||||
|
@ -482,7 +482,7 @@ Shape op::Constant::get_shape_val() const
|
||||
|
||||
Strides op::Constant::get_strides_val() const
|
||||
{
|
||||
NGRAPH_CHECK(m_element_type == element::i64);
|
||||
NGRAPH_CHECK(m_element_type == element::Type_t::i64);
|
||||
std::vector<int64_t> out_strides = cast_vector<int64_t>();
|
||||
Strides output_strides(shape_size(m_shape));
|
||||
std::transform(out_strides.begin(),
|
||||
@ -494,7 +494,7 @@ Strides op::Constant::get_strides_val() const
|
||||
|
||||
Coordinate op::Constant::get_coordinate_val() const
|
||||
{
|
||||
NGRAPH_CHECK(m_element_type == element::i64);
|
||||
NGRAPH_CHECK(m_element_type == element::Type_t::i64);
|
||||
std::vector<int64_t> out_coordinate = cast_vector<int64_t>();
|
||||
Coordinate output_coordinate(shape_size(m_shape));
|
||||
std::transform(out_coordinate.begin(),
|
||||
@ -506,7 +506,7 @@ Coordinate op::Constant::get_coordinate_val() const
|
||||
|
||||
CoordinateDiff op::Constant::get_coordinate_diff_val() const
|
||||
{
|
||||
NGRAPH_CHECK(m_element_type == element::i64);
|
||||
NGRAPH_CHECK(m_element_type == element::Type_t::i64);
|
||||
std::vector<int64_t> out_coordinate_diff = cast_vector<int64_t>();
|
||||
CoordinateDiff output_coordinate_diff(shape_size(m_shape));
|
||||
std::transform(out_coordinate_diff.begin(),
|
||||
|
@ -37,7 +37,7 @@ op::v0::CumSum::CumSum(const Output<Node>& arg,
|
||||
}
|
||||
|
||||
op::v0::CumSum::CumSum(const Output<Node>& arg, const bool exclusive, const bool reverse)
|
||||
: Op({arg, op::Constant::create(element::i32, Shape{}, {0})})
|
||||
: Op({arg, op::Constant::create(element::Type_t::i32, Shape{}, {0})})
|
||||
, m_exclusive(exclusive)
|
||||
, m_reverse(reverse)
|
||||
{
|
||||
@ -65,7 +65,7 @@ void op::v0::CumSum::validate_and_infer_types()
|
||||
|
||||
const auto& axis_type = get_input_element_type(1);
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
axis_type == element::i32 || axis_type == element::i64,
|
||||
axis_type == element::Type_t::i32 || axis_type == element::Type_t::i64,
|
||||
"axis element type must be either int64_t or int32_t but got (",
|
||||
axis_type,
|
||||
").");
|
||||
|
@ -49,11 +49,11 @@ void op::DetectionOutput::validate_and_infer_types()
|
||||
{
|
||||
auto box_logits_shape = get_input_partial_shape(0).to_shape();
|
||||
set_output_type(
|
||||
0, element::f32, Shape{1, 1, m_attrs.keep_top_k[0] * box_logits_shape[0], 7});
|
||||
0, element::Type_t::f32, Shape{1, 1, m_attrs.keep_top_k[0] * box_logits_shape[0], 7});
|
||||
}
|
||||
else
|
||||
{
|
||||
set_output_type(0, element::f32, PartialShape::dynamic());
|
||||
set_output_type(0, element::Type_t::f32, PartialShape::dynamic());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -56,18 +56,18 @@ op::v3::EmbeddingSegmentsSum::EmbeddingSegmentsSum(const Output<Node>& emb_table
|
||||
void op::v3::EmbeddingSegmentsSum::validate_and_infer_types()
|
||||
{
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
get_input_element_type(SEGMENT_IDS) == element::i64 ||
|
||||
get_input_element_type(SEGMENT_IDS) == element::i32,
|
||||
get_input_element_type(SEGMENT_IDS) == element::Type_t::i64 ||
|
||||
get_input_element_type(SEGMENT_IDS) == element::Type_t::i32,
|
||||
"SEGMENT_IDS type must be i32 or i64");
|
||||
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
get_input_element_type(INDICES) == element::i64 ||
|
||||
get_input_element_type(INDICES) == element::i32,
|
||||
get_input_element_type(INDICES) == element::Type_t::i64 ||
|
||||
get_input_element_type(INDICES) == element::Type_t::i32,
|
||||
"INDICES type must be i32 or i64");
|
||||
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
get_input_element_type(NUM_SEGMENTS) == element::i64 ||
|
||||
get_input_element_type(NUM_SEGMENTS) == element::i32,
|
||||
get_input_element_type(NUM_SEGMENTS) == element::Type_t::i64 ||
|
||||
get_input_element_type(NUM_SEGMENTS) == element::Type_t::i32,
|
||||
"NUM_SEGMENTS type must be i32 or i64");
|
||||
|
||||
NODE_VALIDATION_CHECK(
|
||||
@ -110,8 +110,8 @@ void op::v3::EmbeddingSegmentsSum::validate_and_infer_types()
|
||||
if (get_input_size() >= 5)
|
||||
{
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
get_input_element_type(DEFAULT_INDEX) == element::i64 ||
|
||||
get_input_element_type(DEFAULT_INDEX) == element::i32,
|
||||
get_input_element_type(DEFAULT_INDEX) == element::Type_t::i64 ||
|
||||
get_input_element_type(DEFAULT_INDEX) == element::Type_t::i32,
|
||||
"DEFAULT_INDEX type must be i32 or i64");
|
||||
|
||||
NODE_VALIDATION_CHECK(
|
||||
|
@ -65,7 +65,7 @@ namespace equal
|
||||
const op::AutoBroadcastSpec& broadcast_spec)
|
||||
{
|
||||
bool rc = true;
|
||||
out->set_broadcast(broadcast_spec, arg0, arg1, element::boolean);
|
||||
out->set_broadcast(broadcast_spec, arg0, arg1, element::Type_t::boolean);
|
||||
switch (arg0->get_element_type())
|
||||
{
|
||||
TYPE_CASE(boolean)(arg0, arg1, out, broadcast_spec);
|
||||
|
@ -134,7 +134,7 @@ OutputVector op::FakeQuantize::decompose_op() const
|
||||
const auto dequant_scale = (output_high - output_low) / levels_minus_one;
|
||||
|
||||
// zero_point type needs to match the quantization output type
|
||||
const auto zero_point = Constant::create(element::i32, data.get_shape(), {0.0});
|
||||
const auto zero_point = Constant::create(element::Type_t::i32, data.get_shape(), {0.0});
|
||||
const auto axes = get_default_order(input_data_shape);
|
||||
|
||||
// clip the input data to the range <input_low;input_high>
|
||||
@ -148,7 +148,7 @@ OutputVector op::FakeQuantize::decompose_op() const
|
||||
make_shared<op::Quantize>(data,
|
||||
quant_scale,
|
||||
zero_point,
|
||||
element::i32,
|
||||
element::Type_t::i32,
|
||||
axes,
|
||||
op::Quantize::RoundMode::ROUND_NEAREST_TOWARD_EVEN);
|
||||
|
||||
|
@ -167,7 +167,7 @@ namespace gather
|
||||
|
||||
out->set_shape(out_shape);
|
||||
|
||||
if (arg1->get_element_type() == element::i64)
|
||||
if (arg1->get_element_type() == element::Type_t::i64)
|
||||
{
|
||||
runtime::reference::gather<T, int64_t>(arg0->get_data_ptr<ET>(),
|
||||
arg1->get_data_ptr<int64_t>(),
|
||||
@ -177,7 +177,7 @@ namespace gather
|
||||
out->get_shape(),
|
||||
axis);
|
||||
}
|
||||
else if (arg1->get_element_type() == element::i32)
|
||||
else if (arg1->get_element_type() == element::Type_t::i32)
|
||||
{
|
||||
runtime::reference::gather<T, int32_t>(arg0->get_data_ptr<ET>(),
|
||||
arg1->get_data_ptr<int32_t>(),
|
||||
@ -280,7 +280,7 @@ namespace gather
|
||||
if (indices_shape.empty())
|
||||
{
|
||||
// gathering a scalar
|
||||
const auto axes = op::Constant::create(element::i64, Shape{1}, {0});
|
||||
const auto axes = op::Constant::create(element::Type_t::i64, Shape{1}, {0});
|
||||
gathered = make_shared<op::v0::Squeeze>(gathered_concat_input, axes);
|
||||
}
|
||||
|
||||
|
@ -65,7 +65,7 @@ namespace greaterop
|
||||
const op::AutoBroadcastSpec& broadcast_spec)
|
||||
{
|
||||
bool rc = true;
|
||||
out->set_broadcast(broadcast_spec, arg0, arg1, element::boolean);
|
||||
out->set_broadcast(broadcast_spec, arg0, arg1, element::Type_t::boolean);
|
||||
switch (arg0->get_element_type())
|
||||
{
|
||||
TYPE_CASE(boolean)(arg0, arg1, out, broadcast_spec);
|
||||
|
@ -65,7 +65,7 @@ namespace greater_equalop
|
||||
const op::AutoBroadcastSpec& broadcast_spec)
|
||||
{
|
||||
bool rc = true;
|
||||
out->set_broadcast(broadcast_spec, arg0, arg1, element::boolean);
|
||||
out->set_broadcast(broadcast_spec, arg0, arg1, element::Type_t::boolean);
|
||||
switch (arg0->get_element_type())
|
||||
{
|
||||
TYPE_CASE(boolean)(arg0, arg1, out, broadcast_spec);
|
||||
|
@ -78,7 +78,7 @@ OutputVector op::GRN::decompose_op() const
|
||||
data = builder::opset1::reshape(data, data_shape);
|
||||
}
|
||||
|
||||
const auto axis_set_const = op::Constant::create(element::i64, {}, {1});
|
||||
const auto axis_set_const = op::Constant::create(element::Type_t::i64, {}, {1});
|
||||
// Calculate l2 norm across channels.
|
||||
shared_ptr<Node> norm = builder::opset1::l2_norm(data, axis_set_const, m_bias);
|
||||
// Get back reduced axis.
|
||||
|
@ -119,7 +119,7 @@ void op::v3::GRUCell::validate_and_infer_types()
|
||||
}
|
||||
auto merged_batch_size = Dimension::dynamic();
|
||||
auto merged_hidden_size = Dimension::dynamic();
|
||||
auto result_et = element::dynamic;
|
||||
element::Type result_et = element::Type_t::dynamic;
|
||||
|
||||
// Get input partial shape for all inputs
|
||||
const auto& x_pshape = get_input_partial_shape(0);
|
||||
|
@ -74,7 +74,7 @@ void op::v5::GRUSequence::validate_and_infer_types()
|
||||
auto merged_batch_size = Dimension::dynamic();
|
||||
auto merged_hidden_size = Dimension::dynamic();
|
||||
auto merged_num_directions = Dimension::dynamic();
|
||||
auto result_et = element::dynamic;
|
||||
element::Type result_et = element::Type_t::dynamic;
|
||||
|
||||
auto x_pshape = get_input_partial_shape(0);
|
||||
auto ht_pshape = get_input_partial_shape(1);
|
||||
|
@ -221,8 +221,8 @@ void op::v4::Interpolate::validate_and_infer_types()
|
||||
{
|
||||
element::Type input_et = get_input_element_type(0);
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
input_et == element::f32 || input_et == element::f16 ||
|
||||
input_et == element::i8,
|
||||
input_et == element::Type_t::f32 || input_et == element::Type_t::f16 ||
|
||||
input_et == element::Type_t::i8,
|
||||
"Input element type must be f32, f16, or i8");
|
||||
|
||||
PartialShape input_shape = PartialShape(get_input_partial_shape(0));
|
||||
|
@ -65,7 +65,7 @@ namespace lessop
|
||||
const op::AutoBroadcastSpec& broadcast_spec)
|
||||
{
|
||||
bool rc = true;
|
||||
out->set_broadcast(broadcast_spec, arg0, arg1, element::boolean);
|
||||
out->set_broadcast(broadcast_spec, arg0, arg1, element::Type_t::boolean);
|
||||
switch (arg0->get_element_type())
|
||||
{
|
||||
TYPE_CASE(boolean)(arg0, arg1, out, broadcast_spec);
|
||||
|
@ -65,7 +65,7 @@ namespace less_equalop
|
||||
const op::AutoBroadcastSpec& broadcast_spec)
|
||||
{
|
||||
bool rc = true;
|
||||
out->set_broadcast(broadcast_spec, arg0, arg1, element::boolean);
|
||||
out->set_broadcast(broadcast_spec, arg0, arg1, element::Type_t::boolean);
|
||||
switch (arg0->get_element_type())
|
||||
{
|
||||
TYPE_CASE(boolean)(arg0, arg1, out, broadcast_spec);
|
||||
|
@ -25,7 +25,7 @@ using namespace ngraph;
|
||||
constexpr NodeTypeInfo op::LRN::type_info;
|
||||
|
||||
op::LRN::LRN(const Output<Node>& arg, double alpha, double beta, double bias, size_t size)
|
||||
: LRN(arg, op::Constant::create(element::i64, Shape{1}, {1}), alpha, beta, bias, size)
|
||||
: LRN(arg, op::Constant::create(element::Type_t::i64, Shape{1}, {1}), alpha, beta, bias, size)
|
||||
{
|
||||
add_provenance_group_member(input_value(1).get_node_shared_ptr());
|
||||
}
|
||||
|
@ -156,7 +156,7 @@ void op::v0::LSTMCell::validate_and_infer_types()
|
||||
|
||||
auto merged_batch_size = Dimension::dynamic();
|
||||
auto merged_hidden_size = Dimension::dynamic();
|
||||
auto result_et = element::dynamic;
|
||||
element::Type result_et = element::Type_t::dynamic;
|
||||
|
||||
// Copy all inputs without peephole (7th input) and initial_cell_state (2nd input) information
|
||||
// for further validation
|
||||
@ -457,7 +457,7 @@ void op::v4::LSTMCell::validate_and_infer_types()
|
||||
}
|
||||
auto merged_batch_size = Dimension::dynamic();
|
||||
auto merged_hidden_size = Dimension::dynamic();
|
||||
auto result_et = element::dynamic;
|
||||
element::Type result_et = element::Type_t::dynamic;
|
||||
|
||||
// Get input partial shape for all inputs
|
||||
const auto& x_pshape = get_input_partial_shape(0);
|
||||
|
@ -131,8 +131,10 @@ shared_ptr<Node> op::v0::LSTMSequence::get_masked_node(const Output<Node>& data,
|
||||
|
||||
// Create predicate nodes. The condition is whether current time step value
|
||||
// is greater than sequence length for respective batch inputs.
|
||||
shared_ptr<Node> curr_time_step_node = opset1::Constant::create(
|
||||
element::i32, data.get_shape(), vector<int32_t>(shape_size(data.get_shape()), time_step));
|
||||
shared_ptr<Node> curr_time_step_node =
|
||||
opset1::Constant::create(element::Type_t::i32,
|
||||
data.get_shape(),
|
||||
vector<int32_t>(shape_size(data.get_shape()), time_step));
|
||||
|
||||
Output<Node> batch_seq_length = builder::opset1::legacy_broadcast_for_binary_operation(
|
||||
curr_time_step_node, input_value(3).get_node_shared_ptr(), batch_axis);
|
||||
@ -270,7 +272,7 @@ void op::v0::LSTMSequence::validate_and_infer_types()
|
||||
auto merged_batch_size = Dimension::dynamic();
|
||||
auto merged_hidden_size = Dimension::dynamic();
|
||||
auto merged_num_directions = Dimension::dynamic();
|
||||
auto result_et = element::dynamic;
|
||||
element::Type result_et = element::Type_t::dynamic;
|
||||
|
||||
// Copy all inputs without peephole and initial_cell_state information for further validation
|
||||
for (size_t i = 0; i < get_input_size() - 1; i++)
|
||||
@ -468,7 +470,7 @@ void op::v5::LSTMSequence::validate_and_infer_types()
|
||||
auto merged_batch_size = Dimension::dynamic();
|
||||
auto merged_hidden_size = Dimension::dynamic();
|
||||
auto merged_num_directions = Dimension::dynamic();
|
||||
auto result_et = element::dynamic;
|
||||
element::Type result_et = element::Type_t::dynamic;
|
||||
|
||||
// Copy all inputs without initial_cell_state information for further validation
|
||||
for (size_t i = 0; i < get_input_size(); i++)
|
||||
|
@ -52,8 +52,9 @@ OutputVector op::v1::Mod::decompose_op() const
|
||||
const auto divisor = make_shared<op::Abs>(input_value(1));
|
||||
|
||||
// truncated(a / b)
|
||||
auto division = make_shared<op::Convert>(
|
||||
make_shared<op::v1::Divide>(dividend, divisor, m_auto_broadcast), ngraph::element::i64);
|
||||
auto division =
|
||||
make_shared<op::Convert>(make_shared<op::v1::Divide>(dividend, divisor, m_auto_broadcast),
|
||||
ngraph::element::Type_t::i64);
|
||||
division = make_shared<op::Convert>(division, dividend_et);
|
||||
// truncated(a / b) * b
|
||||
const auto multiplication = make_shared<op::v1::Multiply>(division, divisor, m_auto_broadcast);
|
||||
|
@ -52,9 +52,9 @@ op::v1::NonMaxSuppression::NonMaxSuppression(
|
||||
const bool sort_result_descending)
|
||||
: Op({boxes,
|
||||
scores,
|
||||
op::Constant::create(element::i64, Shape{}, {0}),
|
||||
op::Constant::create(element::f32, Shape{}, {.0f}),
|
||||
op::Constant::create(element::f32, Shape{}, {.0f})})
|
||||
op::Constant::create(element::Type_t::i64, Shape{}, {0}),
|
||||
op::Constant::create(element::Type_t::f32, Shape{}, {.0f}),
|
||||
op::Constant::create(element::Type_t::f32, Shape{}, {.0f})})
|
||||
, m_box_encoding{box_encoding}
|
||||
, m_sort_result_descending{sort_result_descending}
|
||||
{
|
||||
@ -71,13 +71,13 @@ std::shared_ptr<Node>
|
||||
|
||||
const auto& arg2 = new_args.size() > 2
|
||||
? new_args.at(2)
|
||||
: ngraph::op::Constant::create(element::i32, Shape{}, {0});
|
||||
: ngraph::op::Constant::create(element::Type_t::i32, Shape{}, {0});
|
||||
const auto& arg3 = new_args.size() > 3
|
||||
? new_args.at(3)
|
||||
: ngraph::op::Constant::create(element::f32, Shape{}, {.0f});
|
||||
: ngraph::op::Constant::create(element::Type_t::f32, Shape{}, {.0f});
|
||||
const auto& arg4 = new_args.size() > 4
|
||||
? new_args.at(4)
|
||||
: ngraph::op::Constant::create(element::f32, Shape{}, {.0f});
|
||||
: ngraph::op::Constant::create(element::Type_t::f32, Shape{}, {.0f});
|
||||
|
||||
return std::make_shared<op::v1::NonMaxSuppression>(
|
||||
new_args.at(0), new_args.at(1), arg2, arg3, arg4, m_box_encoding, m_sort_result_descending);
|
||||
@ -98,7 +98,7 @@ void op::v1::NonMaxSuppression::validate_and_infer_types()
|
||||
// the spec doesn't say what exact type should be used for the output of this op
|
||||
// that's why we're setting it to 64-bit integer to provide the maximum range of values support
|
||||
// this will be changed (configurable) in the next version of this op
|
||||
const auto& output_element_type = element::i64;
|
||||
const auto& output_element_type = element::Type_t::i64;
|
||||
|
||||
// NonMaxSuppression produces triplets
|
||||
// that have the following format: [batch_index, class_index, box_index]
|
||||
@ -249,9 +249,9 @@ op::v3::NonMaxSuppression::NonMaxSuppression(
|
||||
const element::Type& output_type)
|
||||
: Op({boxes,
|
||||
scores,
|
||||
op::Constant::create(element::i64, Shape{}, {0}),
|
||||
op::Constant::create(element::f32, Shape{}, {.0f}),
|
||||
op::Constant::create(element::f32, Shape{}, {.0f})})
|
||||
op::Constant::create(element::Type_t::i64, Shape{}, {0}),
|
||||
op::Constant::create(element::Type_t::f32, Shape{}, {.0f}),
|
||||
op::Constant::create(element::Type_t::f32, Shape{}, {.0f})})
|
||||
, m_box_encoding{box_encoding}
|
||||
, m_sort_result_descending{sort_result_descending}
|
||||
, m_output_type{output_type}
|
||||
@ -269,13 +269,13 @@ std::shared_ptr<Node>
|
||||
|
||||
const auto& arg2 = new_args.size() > 2
|
||||
? new_args.at(2)
|
||||
: ngraph::op::Constant::create(element::i32, Shape{}, {0});
|
||||
: ngraph::op::Constant::create(element::Type_t::i32, Shape{}, {0});
|
||||
const auto& arg3 = new_args.size() > 3
|
||||
? new_args.at(3)
|
||||
: ngraph::op::Constant::create(element::f32, Shape{}, {.0f});
|
||||
: ngraph::op::Constant::create(element::Type_t::f32, Shape{}, {.0f});
|
||||
const auto& arg4 = new_args.size() > 4
|
||||
? new_args.at(4)
|
||||
: ngraph::op::Constant::create(element::f32, Shape{}, {.0f});
|
||||
: ngraph::op::Constant::create(element::Type_t::f32, Shape{}, {.0f});
|
||||
|
||||
return std::make_shared<op::v3::NonMaxSuppression>(new_args.at(0),
|
||||
new_args.at(1),
|
||||
@ -301,7 +301,8 @@ void op::v3::NonMaxSuppression::validate()
|
||||
const auto scores_ps = get_input_partial_shape(1);
|
||||
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
m_output_type == element::i64 || m_output_type == element::i32,
|
||||
m_output_type == element::Type_t::i64 ||
|
||||
m_output_type == element::Type_t::i32,
|
||||
"Output type must be i32 or i64");
|
||||
|
||||
if (boxes_ps.is_dynamic() || scores_ps.is_dynamic())
|
||||
@ -468,9 +469,9 @@ op::v4::NonMaxSuppression::NonMaxSuppression(
|
||||
const element::Type& output_type)
|
||||
: op::v3::NonMaxSuppression(boxes,
|
||||
scores,
|
||||
op::Constant::create(element::i64, Shape{}, {0}),
|
||||
op::Constant::create(element::f32, Shape{}, {.0f}),
|
||||
op::Constant::create(element::f32, Shape{}, {.0f}),
|
||||
op::Constant::create(element::Type_t::i64, Shape{}, {0}),
|
||||
op::Constant::create(element::Type_t::f32, Shape{}, {.0f}),
|
||||
op::Constant::create(element::Type_t::f32, Shape{}, {.0f}),
|
||||
box_encoding,
|
||||
sort_result_descending,
|
||||
output_type)
|
||||
@ -488,13 +489,13 @@ std::shared_ptr<Node>
|
||||
|
||||
const auto& arg2 = new_args.size() > 2
|
||||
? new_args.at(2)
|
||||
: ngraph::op::Constant::create(element::i32, Shape{}, {0});
|
||||
: ngraph::op::Constant::create(element::Type_t::i32, Shape{}, {0});
|
||||
const auto& arg3 = new_args.size() > 3
|
||||
? new_args.at(3)
|
||||
: ngraph::op::Constant::create(element::f32, Shape{}, {.0f});
|
||||
: ngraph::op::Constant::create(element::Type_t::f32, Shape{}, {.0f});
|
||||
const auto& arg4 = new_args.size() > 4
|
||||
? new_args.at(4)
|
||||
: ngraph::op::Constant::create(element::f32, Shape{}, {.0f});
|
||||
: ngraph::op::Constant::create(element::Type_t::f32, Shape{}, {.0f});
|
||||
|
||||
return std::make_shared<op::v4::NonMaxSuppression>(new_args.at(0),
|
||||
new_args.at(1),
|
||||
@ -693,7 +694,7 @@ namespace
|
||||
|
||||
inline bool is_float_type_admissible(const element::Type& t)
|
||||
{
|
||||
return t == element::f32 || t == element::f16 || t == element::bf16;
|
||||
return t == element::Type_t::f32 || t == element::Type_t::f16 || t == element::Type_t::bf16;
|
||||
}
|
||||
|
||||
inline bool is_scalar_or_1d_tensor_with_1_element(const PartialShape& p)
|
||||
@ -715,7 +716,8 @@ void op::v5::NonMaxSuppression::validate()
|
||||
const auto scores_ps = get_input_partial_shape(1);
|
||||
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
m_output_type == element::i64 || m_output_type == element::i32,
|
||||
m_output_type == element::Type_t::i64 ||
|
||||
m_output_type == element::Type_t::i32,
|
||||
"Output type must be i32 or i64");
|
||||
|
||||
if (boxes_ps.is_dynamic() || scores_ps.is_dynamic())
|
||||
@ -920,7 +922,7 @@ void op::v5::NonMaxSuppression::validate_and_infer_types()
|
||||
}
|
||||
|
||||
set_output_type(0, m_output_type, out_shape);
|
||||
set_output_type(1, element::f32, out_shape);
|
||||
set_output_type(1, element::Type_t::f32, out_shape);
|
||||
set_output_type(2, m_output_type, Shape{1});
|
||||
}
|
||||
|
||||
|
@ -62,7 +62,8 @@ void op::v3::NonZero::validate_and_infer_types()
|
||||
"NonZero input data type needs to be a numeric type. Got: ",
|
||||
input_et);
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
m_output_type == element::i64 || m_output_type == element::i32,
|
||||
m_output_type == element::Type_t::i64 ||
|
||||
m_output_type == element::Type_t::i32,
|
||||
"Output type must be i32 or i64");
|
||||
|
||||
// For scalar non-zero value case, onnx test case expects output shape {1, 1}
|
||||
|
@ -65,7 +65,7 @@ namespace not_equalop
|
||||
const op::AutoBroadcastSpec& broadcast_spec)
|
||||
{
|
||||
bool rc = true;
|
||||
out->set_broadcast(broadcast_spec, arg0, arg1, element::boolean);
|
||||
out->set_broadcast(broadcast_spec, arg0, arg1, element::Type_t::boolean);
|
||||
switch (arg0->get_element_type())
|
||||
{
|
||||
TYPE_CASE(boolean)(arg0, arg1, out, broadcast_spec);
|
||||
|
@ -72,14 +72,14 @@ void op::PriorBox::validate_and_infer_types()
|
||||
auto layer_shape = const_shape->get_shape_val();
|
||||
|
||||
set_output_type(0,
|
||||
element::f32,
|
||||
element::Type_t::f32,
|
||||
Shape{2,
|
||||
4 * layer_shape[0] * layer_shape[1] *
|
||||
static_cast<size_t>(number_of_priors(m_attrs))});
|
||||
}
|
||||
else
|
||||
{
|
||||
set_output_type(0, element::f32, PartialShape::dynamic());
|
||||
set_output_type(0, element::Type_t::f32, PartialShape::dynamic());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -80,11 +80,11 @@ void op::PriorBoxClustered::validate_and_infer_types()
|
||||
// {Prior boxes, variances-adjusted prior boxes}
|
||||
const auto num_priors = m_attrs.widths.size();
|
||||
set_output_type(
|
||||
0, element::f32, Shape{2, 4 * layer_shape[0] * layer_shape[1] * num_priors});
|
||||
0, element::Type_t::f32, Shape{2, 4 * layer_shape[0] * layer_shape[1] * num_priors});
|
||||
}
|
||||
else
|
||||
{
|
||||
set_output_type(0, element::f32, PartialShape::dynamic());
|
||||
set_output_type(0, element::Type_t::f32, PartialShape::dynamic());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -363,7 +363,7 @@ void op::v0::Range::validate_and_infer_types()
|
||||
set_input_is_relevant_to_shape(1);
|
||||
set_input_is_relevant_to_shape(2);
|
||||
|
||||
auto result_et = element::dynamic;
|
||||
element::Type result_et = element::Type_t::dynamic;
|
||||
|
||||
NODE_VALIDATION_CHECK(
|
||||
this,
|
||||
@ -373,7 +373,7 @@ void op::v0::Range::validate_and_infer_types()
|
||||
"Element types for start, stop, and step do not match.");
|
||||
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
result_et != element::boolean,
|
||||
result_et != element::Type_t::boolean,
|
||||
"Element type for start, stop, and step, must not be boolean.");
|
||||
|
||||
NODE_VALIDATION_CHECK(
|
||||
|
@ -76,7 +76,7 @@ bool op::v1::ReduceLogicalAnd::evaluate(const HostTensorVector& outputs,
|
||||
const auto& axes = inputs[1];
|
||||
const auto& out = outputs[0];
|
||||
|
||||
if (data->get_element_type() != element::boolean ||
|
||||
if (data->get_element_type() != element::Type_t::boolean ||
|
||||
!axes->get_element_type().is_integral_number())
|
||||
{
|
||||
return false;
|
||||
|
@ -76,7 +76,7 @@ bool op::v1::ReduceLogicalOr::evaluate(const HostTensorVector& outputs,
|
||||
const auto& axes = inputs[1];
|
||||
const auto& out = outputs[0];
|
||||
|
||||
if (data->get_element_type() != element::boolean ||
|
||||
if (data->get_element_type() != element::Type_t::boolean ||
|
||||
!axes->get_element_type().is_integral_number())
|
||||
{
|
||||
return false;
|
||||
|
@ -59,7 +59,7 @@ void op::v1::Reverse::validate_and_infer_types()
|
||||
if (m_mode == Mode::MASK)
|
||||
{
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
get_input_element_type(1) == element::boolean,
|
||||
get_input_element_type(1) == element::Type_t::boolean,
|
||||
"In 'mask' mode the second input must contain boolean values.");
|
||||
}
|
||||
|
||||
|
@ -92,7 +92,7 @@ void op::v0::RNNCell::validate_and_infer_types()
|
||||
}
|
||||
auto merged_batch_size = Dimension::dynamic();
|
||||
auto merged_hidden_size = Dimension::dynamic();
|
||||
auto result_et = element::dynamic;
|
||||
element::Type result_et = element::Type_t::dynamic;
|
||||
|
||||
// Get input partial shape for all inputs
|
||||
const auto& x_pshape = get_input_partial_shape(0);
|
||||
|
@ -71,7 +71,7 @@ void op::v5::RNNSequence::validate_and_infer_types()
|
||||
auto merged_batch_size = Dimension::dynamic();
|
||||
auto merged_hidden_size = Dimension::dynamic();
|
||||
auto merged_num_directions = Dimension::dynamic();
|
||||
auto result_et = element::dynamic;
|
||||
element::Type result_et = element::Type_t::dynamic;
|
||||
|
||||
auto x_pshape = get_input_partial_shape(0);
|
||||
auto ht_pshape = get_input_partial_shape(1);
|
||||
|
@ -46,7 +46,7 @@ void op::v1::Select::validate_and_infer_types()
|
||||
// Condition element type check
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
get_input_element_type(0).is_dynamic() ||
|
||||
get_input_element_type(0) == element::boolean,
|
||||
get_input_element_type(0) == element::Type_t::boolean,
|
||||
"Argument 0 must have boolean element type (element type: ",
|
||||
get_input_element_type(0),
|
||||
").");
|
||||
@ -184,7 +184,7 @@ void op::v0::Select::validate_and_infer_types()
|
||||
{
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
get_input_element_type(0).is_dynamic() ||
|
||||
get_input_element_type(0) == element::boolean,
|
||||
get_input_element_type(0) == element::Type_t::boolean,
|
||||
"Argument 0 must have boolean element type (element type: ",
|
||||
get_input_element_type(0),
|
||||
").");
|
||||
|
@ -42,7 +42,8 @@ op::v3::ShapeOf::ShapeOf(const Output<Node>& arg, element::Type output_type)
|
||||
void op::v3::ShapeOf::validate_and_infer_types()
|
||||
{
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
m_output_type == element::i64 || m_output_type == element::i32,
|
||||
m_output_type == element::Type_t::i64 ||
|
||||
m_output_type == element::Type_t::i32,
|
||||
"Output type must be i32 or i64");
|
||||
set_input_is_relevant_to_value(0, false);
|
||||
set_output_type(0, m_output_type, PartialShape{get_input_partial_shape(0).rank()});
|
||||
@ -141,7 +142,7 @@ namespace shape_of
|
||||
auto index = std::make_shared<op::v0::Constant>(
|
||||
output_type, Shape{1}, std::vector<int64_t>{i});
|
||||
auto axis = std::make_shared<op::v0::Constant>(
|
||||
element::i64, Shape{}, std::vector<int64_t>{0});
|
||||
element::Type_t::i64, Shape{}, std::vector<int64_t>{0});
|
||||
auto temp = make_shared<op::v1::Gather>(shape_of, index, axis);
|
||||
temp->set_friendly_name("DynDim/" + temp->get_name());
|
||||
dimensions.push_back(temp);
|
||||
@ -182,7 +183,7 @@ op::v0::ShapeOf::ShapeOf(const Output<Node>& arg)
|
||||
void op::v0::ShapeOf::validate_and_infer_types()
|
||||
{
|
||||
set_input_is_relevant_to_value(0, false);
|
||||
set_output_type(0, element::i64, PartialShape{get_input_partial_shape(0).rank()});
|
||||
set_output_type(0, element::Type_t::i64, PartialShape{get_input_partial_shape(0).rank()});
|
||||
}
|
||||
|
||||
bool ngraph::op::v0::ShapeOf::visit_attributes(AttributeVisitor& visitor)
|
||||
|
@ -126,7 +126,7 @@ OutputVector op::Squeeze::decompose_op() const
|
||||
auto output_data_shape = get_output_shape(0);
|
||||
return {make_shared<op::v1::Reshape>(
|
||||
data,
|
||||
op::Constant::create(element::u64, {output_data_shape.size()}, output_data_shape),
|
||||
op::Constant::create(element::Type_t::u64, {output_data_shape.size()}, output_data_shape),
|
||||
false)};
|
||||
}
|
||||
|
||||
|
@ -77,12 +77,13 @@ namespace
|
||||
{
|
||||
NGRAPH_CHECK(begin_pshape.rank().is_static() && begin_pshape.rank().get_length() == 1,
|
||||
"Begin input must be 1D");
|
||||
return std::make_shared<op::v1::Broadcast>(op::Constant::create(element::i64, {}, {1}),
|
||||
std::make_shared<op::ShapeOf>(begin));
|
||||
return std::make_shared<op::v1::Broadcast>(
|
||||
op::Constant::create(element::Type_t::i64, {}, {1}),
|
||||
std::make_shared<op::ShapeOf>(begin));
|
||||
}
|
||||
|
||||
return op::Constant::create(
|
||||
element::i64, Shape{strides_length}, vector<int64_t>(strides_length, 1));
|
||||
element::Type_t::i64, Shape{strides_length}, vector<int64_t>(strides_length, 1));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -320,8 +320,9 @@ size_t op::v1::TopK::read_k_from_constant_node(const shared_ptr<Node>& node,
|
||||
const element::Type& k_element_type) const
|
||||
{
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
k_element_type == element::i8 || k_element_type == element::i32 ||
|
||||
k_element_type == element::i64,
|
||||
k_element_type == element::Type_t::i8 ||
|
||||
k_element_type == element::Type_t::i32 ||
|
||||
k_element_type == element::Type_t::i64,
|
||||
"K input element type must be i8, i32 or i64 (got ",
|
||||
k_element_type,
|
||||
").");
|
||||
@ -400,7 +401,7 @@ size_t op::v1::TopK::get_k() const
|
||||
void op::v1::TopK::set_k(size_t k)
|
||||
{
|
||||
this->input(1).replace_source_output(
|
||||
op::Constant::create(element::i64, Shape{}, {k})->output(0));
|
||||
op::Constant::create(element::Type_t::i64, Shape{}, {k})->output(0));
|
||||
}
|
||||
|
||||
bool op::v1::TopK::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
|
||||
|
@ -29,7 +29,7 @@ op::util::ArithmeticReduction::ArithmeticReduction(const Output<Node>& arg,
|
||||
const AxisSet& reduction_axes)
|
||||
: Op({arg,
|
||||
op::Constant::create(
|
||||
element::i64, Shape{reduction_axes.size()}, reduction_axes.to_vector())
|
||||
element::Type_t::i64, Shape{reduction_axes.size()}, reduction_axes.to_vector())
|
||||
->output(0)})
|
||||
{
|
||||
add_provenance_group_member(input_value(1).get_node_shared_ptr());
|
||||
@ -62,9 +62,10 @@ const AxisSet op::util::ArithmeticReduction::get_reduction_axes() const
|
||||
|
||||
void op::util::ArithmeticReduction::set_reduction_axes(const AxisSet& reduction_axes)
|
||||
{
|
||||
this->input(1).replace_source_output(
|
||||
op::Constant::create(element::i64, Shape{reduction_axes.size()}, reduction_axes.to_vector())
|
||||
->output(0));
|
||||
this->input(1).replace_source_output(op::Constant::create(element::Type_t::i64,
|
||||
Shape{reduction_axes.size()},
|
||||
reduction_axes.to_vector())
|
||||
->output(0));
|
||||
}
|
||||
|
||||
void op::util::ArithmeticReduction::validate_and_infer_types()
|
||||
|
@ -44,7 +44,7 @@ void op::util::BinaryElementwiseArithmetic::validate_and_infer_elementwise_arith
|
||||
PartialShape& args_pshape = std::get<1>(args_et_pshape);
|
||||
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
args_et.is_dynamic() || args_et != element::boolean,
|
||||
args_et.is_dynamic() || args_et != element::Type_t::boolean,
|
||||
"Arguments cannot have boolean element type (argument element type: ",
|
||||
args_et,
|
||||
").");
|
||||
|
@ -39,7 +39,7 @@ void op::util::BinaryElementwiseComparison::validate_and_infer_types()
|
||||
auto args_et_pshape = op::util::validate_and_infer_elementwise_args(this, m_autob);
|
||||
PartialShape& args_pshape = std::get<1>(args_et_pshape);
|
||||
|
||||
set_output_type(0, element::boolean, args_pshape);
|
||||
set_output_type(0, element::Type_t::boolean, args_pshape);
|
||||
}
|
||||
|
||||
bool op::util::BinaryElementwiseComparison::visit_attributes(AttributeVisitor& visitor)
|
||||
|
@ -44,12 +44,12 @@ void op::util::BinaryElementwiseLogical::validate_and_infer_elementwise_logical(
|
||||
|
||||
NODE_VALIDATION_CHECK(
|
||||
this,
|
||||
args_et.is_dynamic() || args_et == element::boolean,
|
||||
args_et.is_dynamic() || args_et == element::Type_t::boolean,
|
||||
"Operands for logical operators must have boolean element type but have element type ",
|
||||
args_et,
|
||||
".");
|
||||
|
||||
set_output_type(0, element::boolean, args_pshape);
|
||||
set_output_type(0, element::Type_t::boolean, args_pshape);
|
||||
}
|
||||
|
||||
void op::util::BinaryElementwiseLogical::validate_and_infer_types()
|
||||
|
@ -52,13 +52,13 @@ op::util::EmbeddingBagOffsetsBase::EmbeddingBagOffsetsBase(const Output<Node>& e
|
||||
void op::util::EmbeddingBagOffsetsBase::validate_and_infer_types()
|
||||
{
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
get_input_element_type(OFFSETS) == element::i64 ||
|
||||
get_input_element_type(OFFSETS) == element::i32,
|
||||
get_input_element_type(OFFSETS) == element::Type_t::i64 ||
|
||||
get_input_element_type(OFFSETS) == element::Type_t::i32,
|
||||
"OFFSETS type must be i32 or i64");
|
||||
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
get_input_element_type(INDICES) == element::i64 ||
|
||||
get_input_element_type(INDICES) == element::i32,
|
||||
get_input_element_type(INDICES) == element::Type_t::i64 ||
|
||||
get_input_element_type(INDICES) == element::Type_t::i32,
|
||||
"INDICES type must be i32 or i64");
|
||||
|
||||
NODE_VALIDATION_CHECK(
|
||||
@ -83,8 +83,8 @@ void op::util::EmbeddingBagOffsetsBase::validate_and_infer_types()
|
||||
if (get_input_size() >= 4)
|
||||
{
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
get_input_element_type(DEFAULT_INDEX) == element::i64 ||
|
||||
get_input_element_type(DEFAULT_INDEX) == element::i32,
|
||||
get_input_element_type(DEFAULT_INDEX) == element::Type_t::i64 ||
|
||||
get_input_element_type(DEFAULT_INDEX) == element::Type_t::i32,
|
||||
"DEFAULT_INDEX type must be i32 or i64");
|
||||
|
||||
NODE_VALIDATION_CHECK(
|
||||
|
@ -40,8 +40,8 @@ op::util::EmbeddingBagPackedBase::EmbeddingBagPackedBase(const Output<Node>& emb
|
||||
void op::util::EmbeddingBagPackedBase::validate_and_infer_types()
|
||||
{
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
get_input_element_type(INDICES) == element::i64 ||
|
||||
get_input_element_type(INDICES) == element::i32,
|
||||
get_input_element_type(INDICES) == element::Type_t::i64 ||
|
||||
get_input_element_type(INDICES) == element::Type_t::i32,
|
||||
"INDICES type must be i32 or i64");
|
||||
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
|
@ -68,8 +68,8 @@ void op::util::IndexReduction::validate_and_infer_types()
|
||||
rank,
|
||||
").");
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
m_index_element_type == element::i32 ||
|
||||
m_index_element_type == element::i64,
|
||||
m_index_element_type == element::Type_t::i32 ||
|
||||
m_index_element_type == element::Type_t::i64,
|
||||
"Index element is neither i64 or i32.");
|
||||
|
||||
PartialShape output_shape{PartialShape::dynamic()};
|
||||
|
@ -28,7 +28,7 @@ op::util::LogicalReduction::LogicalReduction()
|
||||
op::util::LogicalReduction::LogicalReduction(const Output<Node>& arg, const AxisSet& reduction_axes)
|
||||
: Op({arg,
|
||||
op::Constant::create(
|
||||
element::i64, Shape{reduction_axes.size()}, reduction_axes.to_vector())
|
||||
element::Type_t::i64, Shape{reduction_axes.size()}, reduction_axes.to_vector())
|
||||
->output(0)})
|
||||
{
|
||||
add_provenance_group_member(input_value(1).get_node_shared_ptr());
|
||||
@ -57,9 +57,10 @@ const AxisSet op::util::LogicalReduction::get_reduction_axes() const
|
||||
|
||||
void op::util::LogicalReduction::set_reduction_axes(const AxisSet& reduction_axes)
|
||||
{
|
||||
this->input(1).replace_source_output(
|
||||
op::Constant::create(element::i64, Shape{reduction_axes.size()}, reduction_axes.to_vector())
|
||||
->output(0));
|
||||
this->input(1).replace_source_output(op::Constant::create(element::Type_t::i64,
|
||||
Shape{reduction_axes.size()},
|
||||
reduction_axes.to_vector())
|
||||
->output(0));
|
||||
}
|
||||
|
||||
void op::util::LogicalReduction::validate_and_infer_types()
|
||||
@ -111,8 +112,8 @@ void op::util::LogicalReduction::validate_and_infer_types()
|
||||
set_input_is_relevant_to_shape(1);
|
||||
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
get_input_element_type(0).compatible(element::boolean),
|
||||
get_input_element_type(0).compatible(element::Type_t::boolean),
|
||||
"Input element type must be boolean.");
|
||||
|
||||
set_output_type(0, element::boolean, result_shape);
|
||||
set_output_type(0, element::Type_t::boolean, result_shape);
|
||||
}
|
||||
|
@ -46,7 +46,7 @@ std::shared_ptr<Node> ngraph::op::util::convert_lstm_node_format(const Output<No
|
||||
const auto& to = gate_order_map.at(to_format);
|
||||
size_t num_gates = 4;
|
||||
|
||||
auto axis_const = std::make_shared<opset4::Constant>(element::i64, Shape{}, axis);
|
||||
auto axis_const = std::make_shared<opset4::Constant>(element::Type_t::i64, Shape{}, axis);
|
||||
OutputVector splitted_node =
|
||||
std::make_shared<opset4::Split>(node, axis_const, num_gates)->outputs();
|
||||
OutputVector nodes_in_new_format(num_gates);
|
||||
|
@ -50,7 +50,7 @@ void op::util::ScatterNDBase::validate_and_infer_types()
|
||||
const PartialShape& updates_shape = get_input_partial_shape(UPDATES);
|
||||
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
indices_et == element::i32 || indices_et == element::i64,
|
||||
indices_et == element::Type_t::i32 || indices_et == element::Type_t::i64,
|
||||
"Indices element type must be i64 or i32");
|
||||
|
||||
NODE_VALIDATION_CHECK(
|
||||
|
@ -36,7 +36,7 @@ void op::util::UnaryElementwiseArithmetic::validate_and_infer_elementwise_arithm
|
||||
PartialShape& args_pshape = std::get<1>(args_et_pshape);
|
||||
|
||||
NODE_VALIDATION_CHECK(this,
|
||||
args_et.is_dynamic() || args_et != element::boolean,
|
||||
args_et.is_dynamic() || args_et != element::Type_t::boolean,
|
||||
"Arguments cannot have boolean element type (argument element type: ",
|
||||
args_et,
|
||||
").");
|
||||
|
@ -25,8 +25,8 @@ NGRAPH_RTTI_DEFINITION(ngraph::pass::ConvertFP32ToFP16, "ConvertFP32ToFP16", 0);
|
||||
|
||||
void pass::ConvertFP32ToFP16::convert_constants_precision()
|
||||
{
|
||||
auto constant =
|
||||
std::make_shared<ngraph::op::Constant>(element::f32, Shape{1}, std::vector<float>{0});
|
||||
auto constant = std::make_shared<ngraph::op::Constant>(
|
||||
element::Type_t::f32, Shape{1}, std::vector<float>{0});
|
||||
|
||||
ngraph::graph_rewrite_callback callback = [](pattern::Matcher& m) {
|
||||
auto constant = std::dynamic_pointer_cast<ngraph::op::Constant>(m.get_match_root());
|
||||
@ -35,7 +35,7 @@ void pass::ConvertFP32ToFP16::convert_constants_precision()
|
||||
return false;
|
||||
}
|
||||
|
||||
if (constant->get_element_type() == element::f32)
|
||||
if (constant->get_element_type() == element::Type_t::f32)
|
||||
{
|
||||
auto data = constant->get_vector<float>();
|
||||
std::vector<ngraph::float16> new_data(data.size());
|
||||
@ -44,7 +44,7 @@ void pass::ConvertFP32ToFP16::convert_constants_precision()
|
||||
new_data[i] = ngraph::float16(data[i]);
|
||||
}
|
||||
auto new_const = std::make_shared<ngraph::op::Constant>(
|
||||
element::f16, constant->get_shape(), new_data);
|
||||
element::Type_t::f16, constant->get_shape(), new_data);
|
||||
new_const->set_friendly_name(constant->get_friendly_name());
|
||||
ngraph::replace_node(constant, new_const);
|
||||
return true;
|
||||
@ -60,13 +60,13 @@ void pass::ConvertFP32ToFP16::convert_constants_precision()
|
||||
|
||||
void pass::ConvertFP32ToFP16::convert_parameters_precision()
|
||||
{
|
||||
auto constant = std::make_shared<ngraph::op::Parameter>(element::f32, Shape{1});
|
||||
auto constant = std::make_shared<ngraph::op::Parameter>(element::Type_t::f32, Shape{1});
|
||||
|
||||
ngraph::graph_rewrite_callback callback = [](pattern::Matcher& m) {
|
||||
auto parameter = std::dynamic_pointer_cast<ngraph::op::Parameter>(m.get_match_root());
|
||||
if (parameter && parameter->get_element_type() == element::f32)
|
||||
if (parameter && parameter->get_element_type() == element::Type_t::f32)
|
||||
{
|
||||
parameter->set_element_type(element::f16);
|
||||
parameter->set_element_type(element::Type_t::f16);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
|
@ -68,5 +68,6 @@ std::shared_ptr<Node> pattern::any_input()
|
||||
|
||||
std::shared_ptr<Node> pattern::any_input(const pattern::op::ValuePredicate& pred)
|
||||
{
|
||||
return std::make_shared<pattern::op::Label>(element::dynamic, PartialShape::dynamic(), pred);
|
||||
return std::make_shared<pattern::op::Label>(
|
||||
element::Type_t::dynamic, PartialShape::dynamic(), pred);
|
||||
}
|
||||
|
@ -62,7 +62,7 @@ runtime::HostTensor::HostTensor(const element::Type& element_type,
|
||||
}
|
||||
|
||||
runtime::HostTensor::HostTensor(const std::string& name)
|
||||
: HostTensor(element::dynamic, PartialShape::dynamic())
|
||||
: HostTensor(element::Type_t::dynamic, PartialShape::dynamic())
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -26,6 +26,7 @@
|
||||
using namespace ngraph;
|
||||
using namespace std;
|
||||
|
||||
NGRAPH_SUPPRESS_DEPRECATED_START
|
||||
const element::Type element::undefined(element::Type_t::undefined);
|
||||
const element::Type element::dynamic(element::Type_t::dynamic);
|
||||
const element::Type element::boolean(element::Type_t::boolean);
|
||||
@ -42,6 +43,7 @@ const element::Type element::u8(element::Type_t::u8);
|
||||
const element::Type element::u16(element::Type_t::u16);
|
||||
const element::Type element::u32(element::Type_t::u32);
|
||||
const element::Type element::u64(element::Type_t::u64);
|
||||
NGRAPH_SUPPRESS_DEPRECATED_END
|
||||
|
||||
constexpr DiscreteTypeInfo AttributeAdapter<element::Type>::type_info;
|
||||
|
||||
@ -102,26 +104,6 @@ static const element_types_map_t& get_type_info_map()
|
||||
return s_type_info_map;
|
||||
};
|
||||
|
||||
std::vector<const element::Type*> element::Type::get_known_types()
|
||||
{
|
||||
std::vector<const element::Type*> rc = {&element::dynamic,
|
||||
&element::boolean,
|
||||
&element::bf16,
|
||||
&element::f16,
|
||||
&element::f32,
|
||||
&element::f64,
|
||||
&element::i8,
|
||||
&element::i16,
|
||||
&element::i32,
|
||||
&element::i64,
|
||||
&element::u1,
|
||||
&element::u8,
|
||||
&element::u16,
|
||||
&element::u32,
|
||||
&element::u64};
|
||||
return rc;
|
||||
}
|
||||
|
||||
element::Type::Type(size_t bitwidth,
|
||||
bool is_real,
|
||||
bool is_signed,
|
||||
@ -145,6 +127,11 @@ const std::string& element::Type::c_type_string() const
|
||||
return get_type_info_map().at(m_type).m_cname;
|
||||
}
|
||||
|
||||
bool element::Type::operator==(const element::Type_t& other) const
|
||||
{
|
||||
return m_type == other;
|
||||
}
|
||||
|
||||
bool element::Type::operator==(const element::Type& other) const
|
||||
{
|
||||
return m_type == other.m_type;
|
||||
@ -292,7 +279,7 @@ bool element::Type::is_real() const
|
||||
|
||||
bool element::Type::is_integral_number() const
|
||||
{
|
||||
return is_integral() && (m_type != element::boolean);
|
||||
return is_integral() && (m_type != element::Type_t::boolean);
|
||||
}
|
||||
|
||||
bool element::Type::is_signed() const
|
||||
|
@ -481,7 +481,7 @@ vector<float> read_float_vector(shared_ptr<runtime::Tensor> tv)
|
||||
vector<float> float_vec;
|
||||
element::Type element_type = tv->get_element_type();
|
||||
|
||||
if (element_type == element::boolean)
|
||||
if (element_type == element::Type_t::boolean)
|
||||
{
|
||||
vector<char> vec = read_vector<char>(tv);
|
||||
// Changed from vector ctor to explicit for loop to add static_cast
|
||||
@ -491,12 +491,12 @@ vector<float> read_float_vector(shared_ptr<runtime::Tensor> tv)
|
||||
float_vec.push_back(static_cast<float>(value));
|
||||
}
|
||||
}
|
||||
else if (element_type == element::bf16)
|
||||
else if (element_type == element::Type_t::bf16)
|
||||
{
|
||||
vector<bfloat16> vec = read_vector<bfloat16>(tv);
|
||||
float_vec = bfloat16::to_float_vector(vec);
|
||||
}
|
||||
else if (element_type == element::f16)
|
||||
else if (element_type == element::Type_t::f16)
|
||||
{
|
||||
vector<float16> vec = read_vector<float16>(tv);
|
||||
for (float16 value : vec)
|
||||
@ -504,7 +504,7 @@ vector<float> read_float_vector(shared_ptr<runtime::Tensor> tv)
|
||||
float_vec.push_back(static_cast<float>(value));
|
||||
}
|
||||
}
|
||||
else if (element_type == element::f32)
|
||||
else if (element_type == element::Type_t::f32)
|
||||
{
|
||||
vector<float> vec = read_vector<float>(tv);
|
||||
for (float value : vec)
|
||||
@ -512,7 +512,7 @@ vector<float> read_float_vector(shared_ptr<runtime::Tensor> tv)
|
||||
float_vec.push_back(static_cast<float>(value));
|
||||
}
|
||||
}
|
||||
else if (element_type == element::f64)
|
||||
else if (element_type == element::Type_t::f64)
|
||||
{
|
||||
vector<double> vec = read_vector<double>(tv);
|
||||
for (double value : vec)
|
||||
@ -520,7 +520,7 @@ vector<float> read_float_vector(shared_ptr<runtime::Tensor> tv)
|
||||
float_vec.push_back(static_cast<float>(value));
|
||||
}
|
||||
}
|
||||
else if (element_type == element::i8)
|
||||
else if (element_type == element::Type_t::i8)
|
||||
{
|
||||
vector<int8_t> vec = read_vector<int8_t>(tv);
|
||||
for (int8_t value : vec)
|
||||
@ -528,7 +528,7 @@ vector<float> read_float_vector(shared_ptr<runtime::Tensor> tv)
|
||||
float_vec.push_back(static_cast<float>(value));
|
||||
}
|
||||
}
|
||||
else if (element_type == element::i16)
|
||||
else if (element_type == element::Type_t::i16)
|
||||
{
|
||||
vector<int16_t> vec = read_vector<int16_t>(tv);
|
||||
for (int16_t value : vec)
|
||||
@ -536,7 +536,7 @@ vector<float> read_float_vector(shared_ptr<runtime::Tensor> tv)
|
||||
float_vec.push_back(static_cast<float>(value));
|
||||
}
|
||||
}
|
||||
else if (element_type == element::i32)
|
||||
else if (element_type == element::Type_t::i32)
|
||||
{
|
||||
vector<int32_t> vec = read_vector<int32_t>(tv);
|
||||
for (int32_t value : vec)
|
||||
@ -544,7 +544,7 @@ vector<float> read_float_vector(shared_ptr<runtime::Tensor> tv)
|
||||
float_vec.push_back(static_cast<float>(value));
|
||||
}
|
||||
}
|
||||
else if (element_type == element::i64)
|
||||
else if (element_type == element::Type_t::i64)
|
||||
{
|
||||
vector<int64_t> vec = read_vector<int64_t>(tv);
|
||||
for (int64_t value : vec)
|
||||
@ -552,7 +552,7 @@ vector<float> read_float_vector(shared_ptr<runtime::Tensor> tv)
|
||||
float_vec.push_back(static_cast<float>(value));
|
||||
}
|
||||
}
|
||||
else if (element_type == element::u8)
|
||||
else if (element_type == element::Type_t::u8)
|
||||
{
|
||||
vector<uint8_t> vec = read_vector<uint8_t>(tv);
|
||||
for (uint8_t value : vec)
|
||||
@ -560,7 +560,7 @@ vector<float> read_float_vector(shared_ptr<runtime::Tensor> tv)
|
||||
float_vec.push_back(static_cast<float>(value));
|
||||
}
|
||||
}
|
||||
else if (element_type == element::u16)
|
||||
else if (element_type == element::Type_t::u16)
|
||||
{
|
||||
vector<uint16_t> vec = read_vector<uint16_t>(tv);
|
||||
for (uint16_t value : vec)
|
||||
@ -568,7 +568,7 @@ vector<float> read_float_vector(shared_ptr<runtime::Tensor> tv)
|
||||
float_vec.push_back(static_cast<float>(value));
|
||||
}
|
||||
}
|
||||
else if (element_type == element::u32)
|
||||
else if (element_type == element::Type_t::u32)
|
||||
{
|
||||
vector<uint32_t> vec = read_vector<uint32_t>(tv);
|
||||
for (uint32_t value : vec)
|
||||
@ -576,7 +576,7 @@ vector<float> read_float_vector(shared_ptr<runtime::Tensor> tv)
|
||||
float_vec.push_back(static_cast<float>(value));
|
||||
}
|
||||
}
|
||||
else if (element_type == element::u64)
|
||||
else if (element_type == element::Type_t::u64)
|
||||
{
|
||||
vector<uint64_t> vec = read_vector<uint64_t>(tv);
|
||||
for (uint64_t value : vec)
|
||||
@ -597,7 +597,7 @@ vector<int64_t> read_index_vector(shared_ptr<runtime::Tensor> tv)
|
||||
vector<int64_t> index_vec;
|
||||
element::Type element_type = tv->get_element_type();
|
||||
|
||||
if (element_type == element::boolean)
|
||||
if (element_type == element::Type_t::boolean)
|
||||
{
|
||||
vector<char> vec = read_vector<char>(tv);
|
||||
// Changed from vector ctor to explicit for loop to add static_cast
|
||||
@ -607,7 +607,7 @@ vector<int64_t> read_index_vector(shared_ptr<runtime::Tensor> tv)
|
||||
index_vec.push_back(static_cast<int64_t>(value));
|
||||
}
|
||||
}
|
||||
else if (element_type == element::bf16)
|
||||
else if (element_type == element::Type_t::bf16)
|
||||
{
|
||||
vector<bfloat16> vec = read_vector<bfloat16>(tv);
|
||||
vector<float> float_vec = bfloat16::to_float_vector(vec);
|
||||
@ -616,7 +616,7 @@ vector<int64_t> read_index_vector(shared_ptr<runtime::Tensor> tv)
|
||||
index_vec.push_back(static_cast<int64_t>(value));
|
||||
}
|
||||
}
|
||||
else if (element_type == element::f16)
|
||||
else if (element_type == element::Type_t::f16)
|
||||
{
|
||||
vector<float16> vec = read_vector<float16>(tv);
|
||||
for (float16 value : vec)
|
||||
@ -624,7 +624,7 @@ vector<int64_t> read_index_vector(shared_ptr<runtime::Tensor> tv)
|
||||
index_vec.push_back(static_cast<int64_t>(static_cast<float>(value)));
|
||||
}
|
||||
}
|
||||
else if (element_type == element::f32)
|
||||
else if (element_type == element::Type_t::f32)
|
||||
{
|
||||
vector<float> vec = read_vector<float>(tv);
|
||||
for (float value : vec)
|
||||
@ -632,7 +632,7 @@ vector<int64_t> read_index_vector(shared_ptr<runtime::Tensor> tv)
|
||||
index_vec.push_back(static_cast<int64_t>(value));
|
||||
}
|
||||
}
|
||||
else if (element_type == element::f64)
|
||||
else if (element_type == element::Type_t::f64)
|
||||
{
|
||||
vector<double> vec = read_vector<double>(tv);
|
||||
for (double value : vec)
|
||||
@ -640,7 +640,7 @@ vector<int64_t> read_index_vector(shared_ptr<runtime::Tensor> tv)
|
||||
index_vec.push_back(static_cast<int64_t>(value));
|
||||
}
|
||||
}
|
||||
else if (element_type == element::i8)
|
||||
else if (element_type == element::Type_t::i8)
|
||||
{
|
||||
vector<int8_t> vec = read_vector<int8_t>(tv);
|
||||
for (int8_t value : vec)
|
||||
@ -648,7 +648,7 @@ vector<int64_t> read_index_vector(shared_ptr<runtime::Tensor> tv)
|
||||
index_vec.push_back(static_cast<int64_t>(value));
|
||||
}
|
||||
}
|
||||
else if (element_type == element::i16)
|
||||
else if (element_type == element::Type_t::i16)
|
||||
{
|
||||
vector<int16_t> vec = read_vector<int16_t>(tv);
|
||||
for (int16_t value : vec)
|
||||
@ -656,7 +656,7 @@ vector<int64_t> read_index_vector(shared_ptr<runtime::Tensor> tv)
|
||||
index_vec.push_back(static_cast<int64_t>(value));
|
||||
}
|
||||
}
|
||||
else if (element_type == element::i32)
|
||||
else if (element_type == element::Type_t::i32)
|
||||
{
|
||||
vector<int32_t> vec = read_vector<int32_t>(tv);
|
||||
for (int32_t value : vec)
|
||||
@ -664,11 +664,11 @@ vector<int64_t> read_index_vector(shared_ptr<runtime::Tensor> tv)
|
||||
index_vec.push_back(static_cast<int64_t>(value));
|
||||
}
|
||||
}
|
||||
else if (element_type == element::i64)
|
||||
else if (element_type == element::Type_t::i64)
|
||||
{
|
||||
index_vec = read_vector<int64_t>(tv);
|
||||
}
|
||||
else if (element_type == element::u8)
|
||||
else if (element_type == element::Type_t::u8)
|
||||
{
|
||||
vector<uint8_t> vec = read_vector<uint8_t>(tv);
|
||||
for (uint8_t value : vec)
|
||||
@ -676,7 +676,7 @@ vector<int64_t> read_index_vector(shared_ptr<runtime::Tensor> tv)
|
||||
index_vec.push_back(static_cast<int64_t>(value));
|
||||
}
|
||||
}
|
||||
else if (element_type == element::u16)
|
||||
else if (element_type == element::Type_t::u16)
|
||||
{
|
||||
vector<uint16_t> vec = read_vector<uint16_t>(tv);
|
||||
for (uint16_t value : vec)
|
||||
@ -684,7 +684,7 @@ vector<int64_t> read_index_vector(shared_ptr<runtime::Tensor> tv)
|
||||
index_vec.push_back(static_cast<int64_t>(value));
|
||||
}
|
||||
}
|
||||
else if (element_type == element::u32)
|
||||
else if (element_type == element::Type_t::u32)
|
||||
{
|
||||
vector<uint32_t> vec = read_vector<uint32_t>(tv);
|
||||
for (uint32_t value : vec)
|
||||
@ -692,7 +692,7 @@ vector<int64_t> read_index_vector(shared_ptr<runtime::Tensor> tv)
|
||||
index_vec.push_back(static_cast<int64_t>(value));
|
||||
}
|
||||
}
|
||||
else if (element_type == element::u64)
|
||||
else if (element_type == element::Type_t::u64)
|
||||
{
|
||||
vector<uint64_t> vec = read_vector<uint64_t>(tv);
|
||||
for (uint64_t value : vec)
|
||||
|
@ -531,7 +531,7 @@ namespace ngraph
|
||||
return static_cast<Type>(m_tensor_proto->data_type());
|
||||
}
|
||||
|
||||
const element::Type& get_ng_type() const
|
||||
element::Type get_ng_type() const
|
||||
{
|
||||
if (!m_tensor_proto->has_data_type())
|
||||
{
|
||||
@ -540,29 +540,29 @@ namespace ngraph
|
||||
switch (m_tensor_proto->data_type())
|
||||
{
|
||||
case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_BOOL:
|
||||
return element::boolean;
|
||||
return element::Type_t::boolean;
|
||||
case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_FLOAT:
|
||||
return element::f32;
|
||||
return element::Type_t::f32;
|
||||
case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_FLOAT16:
|
||||
return element::f16;
|
||||
return element::Type_t::f16;
|
||||
case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_DOUBLE:
|
||||
return element::f64;
|
||||
return element::Type_t::f64;
|
||||
case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_INT8:
|
||||
return element::i8;
|
||||
return element::Type_t::i8;
|
||||
case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_INT16:
|
||||
return element::i16;
|
||||
return element::Type_t::i16;
|
||||
case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_INT32:
|
||||
return element::i32;
|
||||
return element::Type_t::i32;
|
||||
case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_INT64:
|
||||
return element::i64;
|
||||
return element::Type_t::i64;
|
||||
case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_UINT8:
|
||||
return element::u8;
|
||||
return element::Type_t::u8;
|
||||
case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_UINT16:
|
||||
return element::u16;
|
||||
return element::Type_t::u16;
|
||||
case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_UINT32:
|
||||
return element::u32;
|
||||
return element::Type_t::u32;
|
||||
case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_UINT64:
|
||||
return element::u64;
|
||||
return element::Type_t::u64;
|
||||
case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_UNDEFINED:
|
||||
throw error::tensor::data_type_undefined{};
|
||||
default: throw error::tensor::unsupported_data_type{m_tensor_proto->data_type()};
|
||||
@ -575,29 +575,29 @@ namespace ngraph
|
||||
switch (m_tensor_proto->data_type())
|
||||
{
|
||||
case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_BOOL:
|
||||
return make_ng_constant<char>(element::boolean);
|
||||
return make_ng_constant<char>(element::Type_t::boolean);
|
||||
case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_FLOAT:
|
||||
return make_ng_constant<float>(element::f32);
|
||||
return make_ng_constant<float>(element::Type_t::f32);
|
||||
case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_FLOAT16:
|
||||
return make_ng_constant<ngraph::float16>(element::f16);
|
||||
return make_ng_constant<ngraph::float16>(element::Type_t::f16);
|
||||
case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_DOUBLE:
|
||||
return make_ng_constant<double>(element::f64);
|
||||
return make_ng_constant<double>(element::Type_t::f64);
|
||||
case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_INT8:
|
||||
return make_ng_constant<int8_t>(element::i8);
|
||||
return make_ng_constant<int8_t>(element::Type_t::i8);
|
||||
case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_INT16:
|
||||
return make_ng_constant<int16_t>(element::i16);
|
||||
return make_ng_constant<int16_t>(element::Type_t::i16);
|
||||
case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_INT32:
|
||||
return make_ng_constant<int32_t>(element::i32);
|
||||
return make_ng_constant<int32_t>(element::Type_t::i32);
|
||||
case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_INT64:
|
||||
return make_ng_constant<int64_t>(element::i64);
|
||||
return make_ng_constant<int64_t>(element::Type_t::i64);
|
||||
case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_UINT8:
|
||||
return make_ng_constant<uint8_t>(element::u8);
|
||||
return make_ng_constant<uint8_t>(element::Type_t::u8);
|
||||
case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_UINT16:
|
||||
return make_ng_constant<uint16_t>(element::u16);
|
||||
return make_ng_constant<uint16_t>(element::Type_t::u16);
|
||||
case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_UINT32:
|
||||
return make_ng_constant<uint32_t>(element::u32);
|
||||
return make_ng_constant<uint32_t>(element::Type_t::u32);
|
||||
case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_UINT64:
|
||||
return make_ng_constant<uint64_t>(element::u64);
|
||||
return make_ng_constant<uint64_t>(element::Type_t::u64);
|
||||
default: throw error::tensor::unsupported_data_type{m_tensor_proto->data_type()};
|
||||
}
|
||||
}
|
||||
|
@ -75,7 +75,7 @@ namespace ngraph
|
||||
|
||||
const std::string& get_name() const { return m_value_info_proto->name(); }
|
||||
const PartialShape& get_shape() const { return m_partial_shape; }
|
||||
const element::Type& get_element_type() const
|
||||
element::Type get_element_type() const
|
||||
{
|
||||
if (!m_value_info_proto->type().tensor_type().has_elem_type())
|
||||
{
|
||||
|
@ -43,7 +43,8 @@ namespace ngraph
|
||||
return {std::make_shared<default_opset::Gather>(
|
||||
data,
|
||||
indices,
|
||||
default_opset::Constant::create(element::i64, Shape{}, {valid_axis}))};
|
||||
default_opset::Constant::create(
|
||||
element::Type_t::i64, Shape{}, {valid_axis}))};
|
||||
}
|
||||
|
||||
} // namespace set_1
|
||||
|
@ -33,10 +33,10 @@ namespace ngraph
|
||||
inline OutputVector identity(const Node& node)
|
||||
{
|
||||
auto input = node.get_ng_inputs().at(0);
|
||||
if (input.get_element_type() == ngraph::element::boolean)
|
||||
if (input.get_element_type() == ngraph::element::Type_t::boolean)
|
||||
{
|
||||
const auto logic_zero =
|
||||
default_opset::Constant::create(ngraph::element::boolean, {}, {false});
|
||||
const auto logic_zero = default_opset::Constant::create(
|
||||
ngraph::element::Type_t::boolean, {}, {false});
|
||||
return {std::make_shared<default_opset::LogicalOr>(input, logic_zero)};
|
||||
}
|
||||
const auto zero =
|
||||
|
@ -37,7 +37,7 @@ namespace ngraph
|
||||
{
|
||||
namespace common
|
||||
{
|
||||
const ngraph::element::Type& get_ngraph_element_type(std::int64_t onnx_type);
|
||||
const ngraph::element::Type get_ngraph_element_type(std::int64_t onnx_type);
|
||||
|
||||
/// \brief Return a monotonic sequence.
|
||||
///
|
||||
|
@ -62,84 +62,84 @@ namespace ngraph
|
||||
inline std::shared_ptr<default_opset::Constant>
|
||||
make_ng_constant<Tensor::Type::float16>(const Tensor& tensor)
|
||||
{
|
||||
return __make_ng_constant<ngraph::float16>(element::f16, tensor);
|
||||
return __make_ng_constant<ngraph::float16>(element::Type_t::f16, tensor);
|
||||
}
|
||||
|
||||
template <>
|
||||
inline std::shared_ptr<default_opset::Constant>
|
||||
make_ng_constant<Tensor::Type::float32>(const Tensor& tensor)
|
||||
{
|
||||
return __make_ng_constant<float>(element::f32, tensor);
|
||||
return __make_ng_constant<float>(element::Type_t::f32, tensor);
|
||||
}
|
||||
|
||||
template <>
|
||||
inline std::shared_ptr<default_opset::Constant>
|
||||
make_ng_constant<Tensor::Type::float64>(const Tensor& tensor)
|
||||
{
|
||||
return __make_ng_constant<double>(element::f64, tensor);
|
||||
return __make_ng_constant<double>(element::Type_t::f64, tensor);
|
||||
}
|
||||
|
||||
template <>
|
||||
inline std::shared_ptr<default_opset::Constant>
|
||||
make_ng_constant<Tensor::Type::int8>(const Tensor& tensor)
|
||||
{
|
||||
return __make_ng_constant<int8_t>(element::i8, tensor);
|
||||
return __make_ng_constant<int8_t>(element::Type_t::i8, tensor);
|
||||
}
|
||||
|
||||
template <>
|
||||
inline std::shared_ptr<default_opset::Constant>
|
||||
make_ng_constant<Tensor::Type::int16>(const Tensor& tensor)
|
||||
{
|
||||
return __make_ng_constant<int16_t>(element::i16, tensor);
|
||||
return __make_ng_constant<int16_t>(element::Type_t::i16, tensor);
|
||||
}
|
||||
|
||||
template <>
|
||||
inline std::shared_ptr<default_opset::Constant>
|
||||
make_ng_constant<Tensor::Type::int32>(const Tensor& tensor)
|
||||
{
|
||||
return __make_ng_constant<int32_t>(element::i32, tensor);
|
||||
return __make_ng_constant<int32_t>(element::Type_t::i32, tensor);
|
||||
}
|
||||
|
||||
template <>
|
||||
inline std::shared_ptr<default_opset::Constant>
|
||||
make_ng_constant<Tensor::Type::int64>(const Tensor& tensor)
|
||||
{
|
||||
return __make_ng_constant<int64_t>(element::i64, tensor);
|
||||
return __make_ng_constant<int64_t>(element::Type_t::i64, tensor);
|
||||
}
|
||||
|
||||
template <>
|
||||
inline std::shared_ptr<default_opset::Constant>
|
||||
make_ng_constant<Tensor::Type::uint8>(const Tensor& tensor)
|
||||
{
|
||||
return __make_ng_constant<uint8_t>(element::u8, tensor);
|
||||
return __make_ng_constant<uint8_t>(element::Type_t::u8, tensor);
|
||||
}
|
||||
|
||||
template <>
|
||||
inline std::shared_ptr<default_opset::Constant>
|
||||
make_ng_constant<Tensor::Type::uint16>(const Tensor& tensor)
|
||||
{
|
||||
return __make_ng_constant<uint16_t>(element::u16, tensor);
|
||||
return __make_ng_constant<uint16_t>(element::Type_t::u16, tensor);
|
||||
}
|
||||
|
||||
template <>
|
||||
inline std::shared_ptr<default_opset::Constant>
|
||||
make_ng_constant<Tensor::Type::uint32>(const Tensor& tensor)
|
||||
{
|
||||
return __make_ng_constant<uint32_t>(element::u32, tensor);
|
||||
return __make_ng_constant<uint32_t>(element::Type_t::u32, tensor);
|
||||
}
|
||||
|
||||
template <>
|
||||
inline std::shared_ptr<default_opset::Constant>
|
||||
make_ng_constant<Tensor::Type::uint64>(const Tensor& tensor)
|
||||
{
|
||||
return __make_ng_constant<uint64_t>(element::u64, tensor);
|
||||
return __make_ng_constant<uint64_t>(element::Type_t::u64, tensor);
|
||||
}
|
||||
|
||||
template <>
|
||||
inline std::shared_ptr<default_opset::Constant>
|
||||
make_ng_constant<Tensor::Type::boolean>(const Tensor& tensor)
|
||||
{
|
||||
return __make_ng_constant<char>(element::boolean, tensor);
|
||||
return __make_ng_constant<char>(element::Type_t::boolean, tensor);
|
||||
}
|
||||
|
||||
inline std::shared_ptr<default_opset::Constant>
|
||||
|
@ -39,7 +39,8 @@ namespace ngraph
|
||||
}
|
||||
else
|
||||
{
|
||||
constant_value = default_opset::Constant::create(element::f32, {}, {0});
|
||||
constant_value =
|
||||
default_opset::Constant::create(element::Type_t::f32, {}, {0});
|
||||
}
|
||||
return {std::make_shared<default_opset::Broadcast>(constant_value,
|
||||
node.get_ng_inputs().at(0))};
|
||||
|
@ -63,10 +63,11 @@ namespace ngraph
|
||||
padding_above);
|
||||
|
||||
const Strides default_data_dilation_strides(input.get_shape().size() - 2, 1);
|
||||
auto scale_one = make_constant(ngraph::element::f32, Shape{}, 1);
|
||||
auto scale_one = make_constant(ngraph::element::Type_t::f32, Shape{}, 1);
|
||||
auto input_zero_point = make_constant(input.get_element_type(), Shape{}, 0);
|
||||
auto filters_zero_point = make_constant(filters.get_element_type(), Shape{}, 0);
|
||||
auto output_zero_point = make_constant(ngraph::element::i32, Shape{}, 0);
|
||||
auto output_zero_point =
|
||||
make_constant(ngraph::element::Type_t::i32, Shape{}, 0);
|
||||
|
||||
if (num_inputs == 2)
|
||||
{
|
||||
@ -84,7 +85,7 @@ namespace ngraph
|
||||
filters_zero_point,
|
||||
scale_one,
|
||||
output_zero_point,
|
||||
ngraph::element::i32,
|
||||
ngraph::element::Type_t::i32,
|
||||
ngraph::AxisSet{},
|
||||
ngraph::AxisSet{},
|
||||
ngraph::AxisSet{})};
|
||||
@ -110,7 +111,7 @@ namespace ngraph
|
||||
filters_zero_point,
|
||||
scale_one,
|
||||
output_zero_point,
|
||||
ngraph::element::i32,
|
||||
ngraph::element::Type_t::i32,
|
||||
ngraph::AxisSet{},
|
||||
ngraph::AxisSet{},
|
||||
ngraph::AxisSet{})};
|
||||
|
@ -74,7 +74,7 @@ namespace ngraph
|
||||
data,
|
||||
filters,
|
||||
default_opset::Constant::create(
|
||||
element::i64, Shape{output_shape.size()}, output_shape),
|
||||
element::Type_t::i64, Shape{output_shape.size()}, output_shape),
|
||||
strides,
|
||||
dilations,
|
||||
auto_pad_type,
|
||||
@ -113,7 +113,7 @@ namespace ngraph
|
||||
data,
|
||||
filters,
|
||||
default_opset::Constant::create(
|
||||
element::i64, Shape{output_shape.size()}, output_shape),
|
||||
element::Type_t::i64, Shape{output_shape.size()}, output_shape),
|
||||
strides,
|
||||
pads_begin,
|
||||
pads_end,
|
||||
@ -144,10 +144,10 @@ namespace ngraph
|
||||
std::make_shared<default_opset::ShapeOf>(filters);
|
||||
const auto filters_rank =
|
||||
std::make_shared<default_opset::ShapeOf>(filters_shape);
|
||||
const auto one_node =
|
||||
default_opset::Constant::create(element::i64, Shape{1}, {1});
|
||||
const auto zero_node =
|
||||
default_opset::Constant::create(element::i64, Shape{1}, {0});
|
||||
const auto one_node = default_opset::Constant::create(
|
||||
element::Type_t::i64, Shape{1}, {1});
|
||||
const auto zero_node = default_opset::Constant::create(
|
||||
element::Type_t::i64, Shape{1}, {0});
|
||||
|
||||
std::shared_ptr<ngraph::Node> in_c_dim =
|
||||
std::make_shared<default_opset::StridedSlice>(
|
||||
@ -166,8 +166,8 @@ namespace ngraph
|
||||
std::vector<int64_t>{0}); // end mask
|
||||
|
||||
// Apply shape layout transformation:
|
||||
const auto groups_node =
|
||||
default_opset::Constant::create(element::i64, Shape{1}, {groups});
|
||||
const auto groups_node = default_opset::Constant::create(
|
||||
element::Type_t::i64, Shape{1}, {groups});
|
||||
in_c_dim =
|
||||
std::make_shared<default_opset::Divide>(in_c_dim, groups_node);
|
||||
|
||||
@ -192,7 +192,7 @@ namespace ngraph
|
||||
new_bias_shape[1] = conv_pshape[1].get_length();
|
||||
|
||||
bias_shape_node = default_opset::Constant::create(
|
||||
element::i64, Shape{new_bias_shape.size()}, new_bias_shape);
|
||||
element::Type_t::i64, Shape{new_bias_shape.size()}, new_bias_shape);
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -201,10 +201,10 @@ namespace ngraph
|
||||
std::make_shared<default_opset::ShapeOf>(conv_shape);
|
||||
|
||||
// Prepare new bias shape base: [1, 1, 1, 1, ... ]
|
||||
const auto one_node =
|
||||
default_opset::Constant::create(element::i64, Shape{1}, {1});
|
||||
const auto two_node =
|
||||
default_opset::Constant::create(element::i64, Shape{1}, {2});
|
||||
const auto one_node = default_opset::Constant::create(
|
||||
element::Type_t::i64, Shape{1}, {1});
|
||||
const auto two_node = default_opset::Constant::create(
|
||||
element::Type_t::i64, Shape{1}, {2});
|
||||
const auto remaining_shape_length =
|
||||
std::make_shared<default_opset::Subtract>(conv_rank, two_node);
|
||||
const auto remaining_bias_shape_ones =
|
||||
|
@ -41,8 +41,8 @@ namespace ngraph
|
||||
}
|
||||
else
|
||||
{
|
||||
axis =
|
||||
default_opset::Constant::create(element::i64, Shape{}, {0}); // default
|
||||
axis = default_opset::Constant::create(
|
||||
element::Type_t::i64, Shape{}, {0}); // default
|
||||
}
|
||||
return OutputVector{
|
||||
std::make_shared<default_opset::CumSum>(data, axis, exclusive, reverse)};
|
||||
|
@ -41,17 +41,17 @@ namespace ngraph
|
||||
{
|
||||
auto zero_point = inputs[2];
|
||||
|
||||
if (zero_point.get_element_type() != element::f32)
|
||||
if (zero_point.get_element_type() != element::Type_t::f32)
|
||||
{
|
||||
zero_point =
|
||||
std::make_shared<default_opset::Convert>(zero_point, element::f32);
|
||||
zero_point = std::make_shared<default_opset::Convert>(
|
||||
zero_point, element::Type_t::f32);
|
||||
}
|
||||
|
||||
return zero_point;
|
||||
}
|
||||
else
|
||||
{
|
||||
return default_opset::Constant::create(element::f32, Shape{}, {0});
|
||||
return default_opset::Constant::create(element::Type_t::f32, Shape{}, {0});
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -70,12 +70,13 @@ namespace ngraph
|
||||
const auto scale = inputs[1];
|
||||
const auto zero_point = get_zero_point(inputs);
|
||||
|
||||
common::validate_scalar_input(
|
||||
"Dequantization scale", scale.get_node_shared_ptr(), {element::f32});
|
||||
common::validate_scalar_input("Dequantization scale",
|
||||
scale.get_node_shared_ptr(),
|
||||
{element::Type_t::f32});
|
||||
common::validate_scalar_input("Zero point", zero_point.get_node_shared_ptr());
|
||||
|
||||
const auto converted_x =
|
||||
std::make_shared<default_opset::Convert>(x, element::f32);
|
||||
std::make_shared<default_opset::Convert>(x, element::Type_t::f32);
|
||||
|
||||
return {std::make_shared<default_opset::Multiply>(
|
||||
std::make_shared<default_opset::Subtract>(converted_x, zero_point), scale)};
|
||||
@ -163,7 +164,7 @@ namespace ngraph
|
||||
}
|
||||
|
||||
const auto target_shape = default_opset::Constant::create(
|
||||
element::i64, Shape{target_dims.size()}, target_dims);
|
||||
element::Type_t::i64, Shape{target_dims.size()}, target_dims);
|
||||
|
||||
return std::make_shared<default_opset::Reshape>(input, target_shape, true);
|
||||
}
|
||||
@ -198,7 +199,7 @@ namespace ngraph
|
||||
zero_point = reshape_input(zero_point, axis, x_shape);
|
||||
|
||||
const auto converted_x =
|
||||
std::make_shared<default_opset::Convert>(x, element::f32);
|
||||
std::make_shared<default_opset::Convert>(x, element::Type_t::f32);
|
||||
|
||||
return {std::make_shared<default_opset::Multiply>(
|
||||
std::make_shared<default_opset::Subtract>(converted_x, zero_point), scale)};
|
||||
|
@ -57,7 +57,7 @@ namespace ngraph
|
||||
auto reduce_axes_vector = std::vector<std::int64_t>(data_spatial_rank);
|
||||
std::iota(reduce_axes_vector.begin(), reduce_axes_vector.end(), 2);
|
||||
auto reduce_axes = default_opset::Constant::create(
|
||||
element::i64, Shape{data_spatial_rank}, reduce_axes_vector);
|
||||
element::Type_t::i64, Shape{data_spatial_rank}, reduce_axes_vector);
|
||||
|
||||
return {std::make_shared<default_opset::ReduceMean>(data, reduce_axes, true)};
|
||||
}
|
||||
|
@ -57,7 +57,7 @@ namespace ngraph
|
||||
auto reduce_axes_vector = std::vector<std::int64_t>(data_spatial_rank);
|
||||
std::iota(reduce_axes_vector.begin(), reduce_axes_vector.end(), 2);
|
||||
auto reduce_axes = default_opset::Constant::create(
|
||||
element::i64, Shape{data_spatial_rank}, reduce_axes_vector);
|
||||
element::Type_t::i64, Shape{data_spatial_rank}, reduce_axes_vector);
|
||||
|
||||
return {std::make_shared<default_opset::ReduceMax>(data, reduce_axes, true)};
|
||||
}
|
||||
|
@ -50,22 +50,22 @@ namespace ngraph
|
||||
std::make_shared<default_opset::ShapeOf>(coerced_tensor);
|
||||
Output<ngraph::Node> row_size = std::make_shared<default_opset::Gather>(
|
||||
coerced_tensor_shape,
|
||||
default_opset::Constant::create(element::i64, {1}, {1}),
|
||||
default_opset::Constant::create(element::i64, {}, {0}));
|
||||
default_opset::Constant::create(element::Type_t::i64, {1}, {1}),
|
||||
default_opset::Constant::create(element::Type_t::i64, {}, {0}));
|
||||
row_size = ngraph::onnx_import::reshape::interpret_as_scalar(row_size);
|
||||
|
||||
const auto indices_axis = 1;
|
||||
const auto topk = std::make_shared<default_opset::TopK>(
|
||||
coerced_tensor,
|
||||
default_opset::Constant::create(ngraph::element::i64, Shape{}, {1}),
|
||||
default_opset::Constant::create(ngraph::element::Type_t::i64, Shape{}, {1}),
|
||||
indices_axis,
|
||||
default_opset::TopK::Mode::MAX,
|
||||
default_opset::TopK::SortType::NONE);
|
||||
|
||||
const auto on_value =
|
||||
default_opset::Constant::create(ngraph::element::i64, Shape{}, {1});
|
||||
default_opset::Constant::create(ngraph::element::Type_t::i64, Shape{}, {1});
|
||||
const auto off_value =
|
||||
default_opset::Constant::create(ngraph::element::i64, Shape{}, {0});
|
||||
default_opset::Constant::create(ngraph::element::Type_t::i64, Shape{}, {0});
|
||||
|
||||
const auto results = std::make_shared<default_opset::OneHot>(
|
||||
topk->output(1), row_size, on_value, off_value, indices_axis);
|
||||
|
@ -99,7 +99,7 @@ namespace ngraph
|
||||
if (data_pshape.is_static())
|
||||
{
|
||||
data_shape_node = std::make_shared<default_opset::Constant>(
|
||||
element::i64,
|
||||
element::Type_t::i64,
|
||||
Shape{static_cast<size_t>(data_pshape.rank().get_length())},
|
||||
data_pshape.to_shape());
|
||||
}
|
||||
@ -112,11 +112,13 @@ namespace ngraph
|
||||
scale = std::make_shared<default_opset::Broadcast>(
|
||||
scale,
|
||||
data_shape_node,
|
||||
std::make_shared<default_opset::Constant>(element::i64, Shape{1}, 1));
|
||||
std::make_shared<default_opset::Constant>(
|
||||
element::Type_t::i64, Shape{1}, 1));
|
||||
bias = std::make_shared<default_opset::Broadcast>(
|
||||
bias,
|
||||
data_shape_node,
|
||||
std::make_shared<default_opset::Constant>(element::i64, Shape{1}, 1));
|
||||
std::make_shared<default_opset::Constant>(
|
||||
element::Type_t::i64, Shape{1}, 1));
|
||||
|
||||
// scale * mvn + bias
|
||||
std::shared_ptr<ngraph::Node> result =
|
||||
|
@ -32,7 +32,8 @@ namespace ngraph
|
||||
{
|
||||
const auto coerced_data = ngraph::builder::opset1::flatten(data, axis);
|
||||
|
||||
const auto axis_1 = default_opset::Constant::create(element::i64, Shape{1}, {1});
|
||||
const auto axis_1 =
|
||||
default_opset::Constant::create(element::Type_t::i64, Shape{1}, {1});
|
||||
const auto max =
|
||||
std::make_shared<default_opset::ReduceMax>(coerced_data, axis_1, true);
|
||||
|
||||
|
@ -62,7 +62,7 @@ namespace ngraph
|
||||
->input_value(1)
|
||||
.get_node_shared_ptr();
|
||||
if (ngraph::op::is_constant(second_input) &&
|
||||
second_input->get_element_type() == element::boolean &&
|
||||
second_input->get_element_type() == element::Type_t::boolean &&
|
||||
as_type_ptr<default_opset::Constant>(second_input)
|
||||
->cast_vector<bool>()
|
||||
.at(0) == false)
|
||||
@ -90,7 +90,8 @@ namespace ngraph
|
||||
if (ngraph::op::is_null(ng_inputs.at(0))) // trip count skipped
|
||||
{
|
||||
// -1 means infinite Loop
|
||||
trip_count = ngraph::op::Constant::create(ngraph::element::i64, {1}, {-1});
|
||||
trip_count =
|
||||
ngraph::op::Constant::create(ngraph::element::Type_t::i64, {1}, {-1});
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -102,8 +103,8 @@ namespace ngraph
|
||||
if (ngraph::op::is_null(
|
||||
ng_inputs.at(1).get_node_shared_ptr())) // termination condition skipped
|
||||
{
|
||||
termination_cond =
|
||||
ngraph::op::Constant::create(ngraph::element::boolean, {1}, {true});
|
||||
termination_cond = ngraph::op::Constant::create(
|
||||
ngraph::element::Type_t::boolean, {1}, {true});
|
||||
}
|
||||
else if (ngraph::op::is_constant(ng_inputs.at(1).get_node_shared_ptr()) &&
|
||||
as_type_ptr<default_opset::Constant>(
|
||||
@ -130,8 +131,8 @@ namespace ngraph
|
||||
}
|
||||
|
||||
const int64_t concat_axis = 0;
|
||||
const auto concat_axis_const =
|
||||
ngraph::op::Constant::create(ngraph::element::i64, {1}, {concat_axis});
|
||||
const auto concat_axis_const = ngraph::op::Constant::create(
|
||||
ngraph::element::Type_t::i64, {1}, {concat_axis});
|
||||
// provide scalar handing for scan outputs
|
||||
for (size_t i = loop_carried_dependencies.size() + 1; i < body_outputs.size();
|
||||
++i)
|
||||
@ -149,8 +150,8 @@ namespace ngraph
|
||||
// optimization allow to improve nG Loop shape inference
|
||||
if (is_termination_condition_always_true(body_loop_out_cond))
|
||||
{
|
||||
body_outputs[0] =
|
||||
ngraph::op::Constant::create(ngraph::element::boolean, {1}, {true});
|
||||
body_outputs[0] = ngraph::op::Constant::create(
|
||||
ngraph::element::Type_t::boolean, {1}, {true});
|
||||
}
|
||||
|
||||
CHECK_VALID_NODE(node,
|
||||
|
@ -58,12 +58,14 @@ namespace ngraph
|
||||
"Only normalization of 1st or 2nd order is supported.");
|
||||
|
||||
const auto normalize_axis_const =
|
||||
default_opset::Constant::create(element::i64, {}, {normalize_axis});
|
||||
default_opset::Constant::create(element::Type_t::i64, {}, {normalize_axis});
|
||||
std::shared_ptr<ngraph::Node> norm = ngraph::builder::opset1::lp_norm(
|
||||
data, normalize_axis_const, static_cast<std::size_t>(p_norm));
|
||||
|
||||
const auto target_shape = default_opset::Constant::create(
|
||||
element::i64, Shape{size_t(data_rank_value)}, data_shape.to_shape());
|
||||
const auto target_shape =
|
||||
default_opset::Constant::create(element::Type_t::i64,
|
||||
Shape{size_t(data_rank_value)},
|
||||
data_shape.to_shape());
|
||||
|
||||
// Create a default axes order matching the data tensor rank and erase the
|
||||
// element at the 'normalize_axis' position. The erased element indicates the
|
||||
@ -74,7 +76,7 @@ namespace ngraph
|
||||
axes_values.erase(axes_values.begin() + normalize_axis);
|
||||
|
||||
const auto axes_mapping = default_opset::Constant::create(
|
||||
element::i64, Shape{axes_values.size()}, axes_values);
|
||||
element::Type_t::i64, Shape{axes_values.size()}, axes_values);
|
||||
|
||||
norm = std::make_shared<default_opset::Broadcast>(
|
||||
norm, target_shape, axes_mapping);
|
||||
|
@ -75,7 +75,7 @@ namespace ngraph
|
||||
output_shape.at(0) = data_shape[0].get_length();
|
||||
|
||||
const auto reshape_pattern = default_opset::Constant::create(
|
||||
element::i64, Shape{output_shape.size()}, output_shape);
|
||||
element::Type_t::i64, Shape{output_shape.size()}, output_shape);
|
||||
|
||||
slice =
|
||||
std::make_shared<default_opset::Reshape>(slice, reshape_pattern, false);
|
||||
|
@ -211,7 +211,7 @@ namespace ngraph
|
||||
|
||||
m_input_map[LSTMInput::LSTM_INPUT_SEQ_LENGTHS] =
|
||||
default_opset::Constant::create(
|
||||
element::i32,
|
||||
element::Type_t::i32,
|
||||
Shape{m_dim_map[LSTMInputDimension::BATCH_SIZE]},
|
||||
std::vector<std::int32_t>(
|
||||
m_dim_map[LSTMInputDimension::BATCH_SIZE],
|
||||
|
@ -49,7 +49,7 @@ namespace ngraph
|
||||
else
|
||||
{
|
||||
max_output_boxes_per_class =
|
||||
default_opset::Constant::create(element::i64, Shape{}, {0});
|
||||
default_opset::Constant::create(element::Type_t::i64, Shape{}, {0});
|
||||
}
|
||||
|
||||
Output<ngraph::Node> iou_threshold;
|
||||
@ -61,7 +61,7 @@ namespace ngraph
|
||||
else
|
||||
{
|
||||
iou_threshold =
|
||||
default_opset::Constant::create(element::f32, Shape{}, {.0f});
|
||||
default_opset::Constant::create(element::Type_t::f32, Shape{}, {.0f});
|
||||
}
|
||||
|
||||
Output<ngraph::Node> score_threshold;
|
||||
@ -73,7 +73,7 @@ namespace ngraph
|
||||
else
|
||||
{
|
||||
score_threshold =
|
||||
default_opset::Constant::create(element::f32, Shape{}, {.0f});
|
||||
default_opset::Constant::create(element::Type_t::f32, Shape{}, {.0f});
|
||||
}
|
||||
|
||||
const auto center_point_box =
|
||||
|
@ -30,7 +30,7 @@ namespace ngraph
|
||||
OutputVector non_zero(const Node& node)
|
||||
{
|
||||
const auto data = node.get_ng_inputs().at(0);
|
||||
return {std::make_shared<default_opset::NonZero>(data, element::i64)};
|
||||
return {std::make_shared<default_opset::NonZero>(data, element::Type_t::i64)};
|
||||
}
|
||||
|
||||
} // namespace set_1
|
||||
|
@ -32,13 +32,14 @@ namespace ngraph
|
||||
OutputVector onehot(const Node& node)
|
||||
{
|
||||
OutputVector inputs{node.get_ng_inputs()};
|
||||
auto indices =
|
||||
std::make_shared<default_opset::Convert>(inputs.at(0), element::i64);
|
||||
auto indices = std::make_shared<default_opset::Convert>(inputs.at(0),
|
||||
element::Type_t::i64);
|
||||
auto depth = reshape::interpret_as_scalar(inputs.at(1));
|
||||
|
||||
// Rank 1 tensor containing exactly two elements: [off_value, on_value]
|
||||
auto values = inputs.at(2);
|
||||
auto split_axis = default_opset::Constant::create(element::i64, {}, {0});
|
||||
auto split_axis =
|
||||
default_opset::Constant::create(element::Type_t::i64, {}, {0});
|
||||
auto off_on_values =
|
||||
std::make_shared<default_opset::Split>(values, split_axis, 2);
|
||||
auto off_value = reshape::interpret_as_scalar(off_on_values->output(0));
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user