Revert "Deprecate global element types (#3444)" (#3468)

* Revert "Deprecate global element types (#3444)"

This reverts commit 071fb9d1c6.

* Fixed code style
This commit is contained in:
Ilya Churaev 2020-12-04 13:28:53 +03:00 committed by GitHub
parent 3d66869081
commit 256e047ad2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
372 changed files with 7625 additions and 8036 deletions

View File

@ -103,9 +103,9 @@ methods have been decorated with deprecated warnings which may be enabled by set
To update, remove the passed argument. For example, To update, remove the passed argument. For example,
```C++ ```C++
// Old // Old
make_shared<Parameter>(make_shared<descriptor::TensorViewType>(element::Type_t::f32, Shape{2, 4})); make_shared<Parameter>(make_shared<descriptor::TensorViewType>(element::f32, Shape{2, 4}));
// New (remove TensorViewType) // New (remove TensorViewType)
make_shared<Parameter>(element::Type_t::f32, Shape{2, 4}); make_shared<Parameter>(element::f32, Shape{2, 4});
// Old // Old
make_shared<Function>(results, result_type, parameters); make_shared<Function>(results, result_type, parameters);

View File

@ -169,7 +169,7 @@ namespace ngraph
std::size_t start_match_axis) std::size_t start_match_axis)
{ {
auto shape_const = auto shape_const =
op::Constant::create(element::Type_t::u64, Shape{new_shape.size()}, new_shape); op::Constant::create(element::u64, Shape{new_shape.size()}, new_shape);
return std::make_shared<op::v1::Broadcast>( return std::make_shared<op::v1::Broadcast>(
value, value,
shape_const, shape_const,

View File

@ -177,8 +177,8 @@ namespace ngraph
if (!broadcast_axes.empty()) if (!broadcast_axes.empty())
{ {
auto shape_const = op::Constant::create( auto shape_const =
element::Type_t::u64, Shape{output_shape.size()}, output_shape); op::Constant::create(element::u64, Shape{output_shape.size()}, output_shape);
broadcasted_node = make_shared<op::v1::Broadcast>( broadcasted_node = make_shared<op::v1::Broadcast>(
broadcasted_node, broadcasted_node,
shape_const, shape_const,
@ -236,8 +236,8 @@ namespace ngraph
trimmed_value = builder::opset1::reshape(value, trimmed_value_shape); trimmed_value = builder::opset1::reshape(value, trimmed_value_shape);
} }
auto shape_const = op::Constant::create( auto shape_const =
element::Type_t::u64, Shape{output_shape.size()}, output_shape); op::Constant::create(element::u64, Shape{output_shape.size()}, output_shape);
auto value_bcast = make_shared<op::v1::Broadcast>( auto value_bcast = make_shared<op::v1::Broadcast>(
trimmed_value, shape_const, opset1::get_axes_mapping_output(output_shape, axes)); trimmed_value, shape_const, opset1::get_axes_mapping_output(output_shape, axes));
@ -354,8 +354,7 @@ namespace ngraph
iota(begin(axes) + start_match_axis, end(axes), start_match_axis + input_shape.size()); iota(begin(axes) + start_match_axis, end(axes), start_match_axis + input_shape.size());
auto axes_mapping = opset1::get_axes_mapping(output_shape, axes); auto axes_mapping = opset1::get_axes_mapping(output_shape, axes);
return op::Constant::create( return op::Constant::create(element::i64, Shape{axes_mapping.size()}, axes_mapping);
element::Type_t::i64, Shape{axes_mapping.size()}, axes_mapping);
} }
namespace opset1 namespace opset1
@ -435,15 +434,14 @@ namespace ngraph
vector<size_t> mapping(input_shape.size()); vector<size_t> mapping(input_shape.size());
iota(begin(mapping), end(mapping), start_match_axis); iota(begin(mapping), end(mapping), start_match_axis);
return op::Constant::create(element::Type_t::i64, Shape{mapping.size()}, mapping); return op::Constant::create(element::i64, Shape{mapping.size()}, mapping);
} }
Output<Node> get_axes_mapping_output(const Shape& output_shape, Output<Node> get_axes_mapping_output(const Shape& output_shape,
const AxisSet& broadcast_axes) const AxisSet& broadcast_axes)
{ {
vector<size_t> axes_mapping{get_axes_mapping(output_shape, broadcast_axes)}; vector<size_t> axes_mapping{get_axes_mapping(output_shape, broadcast_axes)};
return op::Constant::create( return op::Constant::create(element::i64, Shape{axes_mapping.size()}, axes_mapping);
element::Type_t::i64, Shape{axes_mapping.size()}, axes_mapping);
} }
Output<Node> make_broadcast(const Output<Node>& node, Output<Node> make_broadcast(const Output<Node>& node,
@ -452,8 +450,7 @@ namespace ngraph
{ {
return make_shared<op::v1::Broadcast>( return make_shared<op::v1::Broadcast>(
node, node,
op::Constant::create( op::Constant::create(element::i64, Shape{target_shape.size()}, target_shape),
element::Type_t::i64, Shape{target_shape.size()}, target_shape),
get_axes_mapping_output(target_shape, broadcast_axes)); get_axes_mapping_output(target_shape, broadcast_axes));
} }
@ -463,8 +460,7 @@ namespace ngraph
{ {
return make_shared<op::v1::Broadcast>( return make_shared<op::v1::Broadcast>(
node, node,
op::Constant::create( op::Constant::create(element::i64, Shape{target_shape.size()}, target_shape),
element::Type_t::i64, Shape{target_shape.size()}, target_shape),
get_axes_mapping_output(target_shape, node.get_shape(), start_match_axis)); get_axes_mapping_output(target_shape, node.get_shape(), start_match_axis));
} }

View File

@ -49,10 +49,10 @@ namespace ngraph
const auto dim_values = std::make_shared<ngraph::opset1::Gather>( const auto dim_values = std::make_shared<ngraph::opset1::Gather>(
value_shape, value_shape,
reduction_axes, reduction_axes,
ngraph::opset1::Constant::create(element::Type_t::i64, {}, {0})); ngraph::opset1::Constant::create(element::i64, {}, {0}));
return std::make_shared<ngraph::opset1::ReduceProd>( return std::make_shared<ngraph::opset1::ReduceProd>(
dim_values, ngraph::opset1::Constant::create(element::Type_t::i64, {}, {0})); dim_values, ngraph::opset1::Constant::create(element::i64, {}, {0}));
} }
std::shared_ptr<Node> builder::opset1::mean(const Output<Node>& value, std::shared_ptr<Node> builder::opset1::mean(const Output<Node>& value,
@ -62,7 +62,7 @@ namespace ngraph
std::shared_ptr<Node> elems_number; std::shared_ptr<Node> elems_number;
const auto value_elem_type = value.get_element_type(); const auto value_elem_type = value.get_element_type();
const auto reduction_axes_const = ngraph::opset1::Constant::create( const auto reduction_axes_const = ngraph::opset1::Constant::create(
element::Type_t::i64, Shape{reduction_axes.size()}, reduction_axes.to_vector()); element::i64, Shape{reduction_axes.size()}, reduction_axes.to_vector());
const auto value_elems_sum = const auto value_elems_sum =
std::make_shared<ngraph::opset1::ReduceSum>(value, reduction_axes_const, keep_dims); std::make_shared<ngraph::opset1::ReduceSum>(value, reduction_axes_const, keep_dims);
if (value.get_partial_shape().is_static()) if (value.get_partial_shape().is_static())
@ -109,7 +109,7 @@ namespace ngraph
diff = std::make_shared<ngraph::opset1::ReduceSum>( diff = std::make_shared<ngraph::opset1::ReduceSum>(
std::make_shared<ngraph::opset1::Multiply>(diff, diff), std::make_shared<ngraph::opset1::Multiply>(diff, diff),
ngraph::opset1::Constant::create( ngraph::opset1::Constant::create(
element::Type_t::i64, Shape{reduction_axes.size()}, reduction_axes.to_vector()), element::i64, Shape{reduction_axes.size()}, reduction_axes.to_vector()),
false); false);
const auto& et = value.get_element_type(); const auto& et = value.get_element_type();

View File

@ -47,13 +47,13 @@ shared_ptr<Node> builder::opset1::reshape(const Output<Node>& value, const Shape
auto value_rank = value.get_shape().size(); auto value_rank = value.get_shape().size();
AxisVector axes_vector(value_rank); AxisVector axes_vector(value_rank);
std::iota(axes_vector.begin(), axes_vector.end(), 0); std::iota(axes_vector.begin(), axes_vector.end(), 0);
auto axes = op::Constant::create(element::Type_t::i64, Shape{value_rank}, axes_vector); auto axes = op::Constant::create(element::i64, Shape{value_rank}, axes_vector);
return std::make_shared<op::Squeeze>(value, axes); return std::make_shared<op::Squeeze>(value, axes);
} }
else else
{ {
auto out_pattern = op::Constant::create( auto out_pattern = op::Constant::create(
element::Type_t::i64, Shape{shape.size()}, vector<int64_t>(shape.begin(), shape.end())); element::i64, Shape{shape.size()}, vector<int64_t>(shape.begin(), shape.end()));
return make_shared<ngraph::opset1::Reshape>(value, out_pattern, false) return make_shared<ngraph::opset1::Reshape>(value, out_pattern, false)
->add_provenance_group_members_above({value}); ->add_provenance_group_members_above({value});
@ -63,7 +63,7 @@ shared_ptr<Node> builder::opset1::reshape(const Output<Node>& value, const Shape
shared_ptr<Node> builder::opset1::reorder_axes(const Output<Node>& value, vector<size_t> axes_order) shared_ptr<Node> builder::opset1::reorder_axes(const Output<Node>& value, vector<size_t> axes_order)
{ {
const auto axes_order_const = const auto axes_order_const =
op::Constant::create(element::Type_t::i64, op::Constant::create(element::i64,
Shape{axes_order.size()}, Shape{axes_order.size()},
vector<int64_t>(axes_order.begin(), axes_order.end())); vector<int64_t>(axes_order.begin(), axes_order.end()));
return make_shared<ngraph::opset1::Transpose>(value, axes_order_const) return make_shared<ngraph::opset1::Transpose>(value, axes_order_const)
@ -83,7 +83,7 @@ shared_ptr<Node> builder::opset1::transpose(const Output<Node>& value)
const auto input_rank = const auto input_rank =
std::make_shared<ngraph::opset1::ShapeOf>(std::make_shared<ngraph::opset1::ShapeOf>(value)); std::make_shared<ngraph::opset1::ShapeOf>(std::make_shared<ngraph::opset1::ShapeOf>(value));
const auto neg_one = ngraph::opset1::Constant::create(element::Type_t::i64, Shape{}, {-1}); const auto neg_one = ngraph::opset1::Constant::create(element::i64, Shape{}, {-1});
const auto start_node = std::make_shared<ngraph::opset1::Add>(input_rank, neg_one); const auto start_node = std::make_shared<ngraph::opset1::Add>(input_rank, neg_one);
const auto reverse_axes_order = const auto reverse_axes_order =
std::make_shared<ngraph::opset1::Range>(reshape(start_node, Shape{}), // start std::make_shared<ngraph::opset1::Range>(reshape(start_node, Shape{}), // start
@ -114,7 +114,7 @@ namespace ngraph
get_normalized_axis_node(const std::shared_ptr<Node> node_rank, int64_t axis) get_normalized_axis_node(const std::shared_ptr<Node> node_rank, int64_t axis)
{ {
auto axis_node = auto axis_node =
ngraph::opset1::Constant::create(element::Type_t::i64, Shape{1}, {axis}); ngraph::opset1::Constant::create(element::i64, Shape{1}, {axis});
// shortcut for alredy positive value // shortcut for alredy positive value
if (axis >= 0) if (axis >= 0)
{ {
@ -138,11 +138,11 @@ shared_ptr<Node> builder::opset1::flatten(const Output<Node>& value, int axis)
shared_ptr<Node> output_shape; shared_ptr<Node> output_shape;
if (axis == 0) if (axis == 0)
{ {
output_shape = ngraph::opset1::Constant::create(element::Type_t::i64, Shape{2}, {1, -1}); output_shape = ngraph::opset1::Constant::create(element::i64, Shape{2}, {1, -1});
} }
else if (axis == 1) else if (axis == 1)
{ {
output_shape = ngraph::opset1::Constant::create(element::Type_t::i64, Shape{2}, {0, -1}); output_shape = ngraph::opset1::Constant::create(element::i64, Shape{2}, {0, -1});
} }
else else
{ {
@ -152,15 +152,15 @@ shared_ptr<Node> builder::opset1::flatten(const Output<Node>& value, int axis)
const auto first_part_dims = make_shared<ngraph::opset1::StridedSlice>( const auto first_part_dims = make_shared<ngraph::opset1::StridedSlice>(
value_shape, value_shape,
ngraph::opset1::Constant::create(element::Type_t::i64, {1}, {0}), ngraph::opset1::Constant::create(element::i64, {1}, {0}),
axis_node, axis_node,
vector<int64_t>{}, vector<int64_t>{},
vector<int64_t>{}); vector<int64_t>{});
const auto first_part_dims_length = make_shared<ngraph::opset1::ReduceProd>( const auto first_part_dims_length = make_shared<ngraph::opset1::ReduceProd>(
first_part_dims, ngraph::opset1::Constant::create(element::Type_t::i64, {}, {0}), true); first_part_dims, ngraph::opset1::Constant::create(element::i64, {}, {0}), true);
const auto remaining_part_length = const auto remaining_part_length =
ngraph::opset1::Constant::create(element::Type_t::i64, {1}, {-1}); ngraph::opset1::Constant::create(element::i64, {1}, {-1});
output_shape = make_shared<ngraph::opset1::Concat>( output_shape = make_shared<ngraph::opset1::Concat>(
OutputVector{first_part_dims_length, remaining_part_length}, 0); OutputVector{first_part_dims_length, remaining_part_length}, 0);
@ -230,21 +230,19 @@ shared_ptr<Node> builder::opset1::collapse(const Output<Node>& value,
const auto rank = make_shared<ngraph::opset1::ShapeOf>(shape); const auto rank = make_shared<ngraph::opset1::ShapeOf>(shape);
// Split lengths used in VariadicSplit // Split lengths used in VariadicSplit
const auto start_axis_node = const auto start_axis_node = ngraph::opset1::Constant::create(element::i64, {1}, {start_axis});
ngraph::opset1::Constant::create(element::Type_t::i64, {1}, {start_axis}); const auto end_axis_node = ngraph::opset1::Constant::create(element::i64, {1}, {end_axis + 1});
const auto end_axis_node =
ngraph::opset1::Constant::create(element::Type_t::i64, {1}, {end_axis + 1});
const auto collapsed_axis = const auto collapsed_axis =
make_shared<ngraph::opset1::Subtract>(end_axis_node, start_axis_node); make_shared<ngraph::opset1::Subtract>(end_axis_node, start_axis_node);
const auto post_axis = make_shared<ngraph::opset1::Subtract>(rank, end_axis_node); const auto post_axis = make_shared<ngraph::opset1::Subtract>(rank, end_axis_node);
const auto split_lengths = make_shared<ngraph::opset1::Concat>( const auto split_lengths = make_shared<ngraph::opset1::Concat>(
OutputVector{start_axis_node, collapsed_axis, post_axis}, 0); OutputVector{start_axis_node, collapsed_axis, post_axis}, 0);
const auto split_axis = ngraph::opset1::Constant::create(element::Type_t::i64, {}, {0}); const auto split_axis = ngraph::opset1::Constant::create(element::i64, {}, {0});
const auto split_node = const auto split_node =
make_shared<ngraph::opset1::VariadicSplit>(shape, split_axis, split_lengths); make_shared<ngraph::opset1::VariadicSplit>(shape, split_axis, split_lengths);
const auto reduced_axis = ngraph::opset1::Constant::create(element::Type_t::i64, {1}, {0}); const auto reduced_axis = ngraph::opset1::Constant::create(element::i64, {1}, {0});
const auto collapsed_axis_size = const auto collapsed_axis_size =
make_shared<ngraph::opset1::ReduceProd>(split_node->output(1), reduced_axis, true); make_shared<ngraph::opset1::ReduceProd>(split_node->output(1), reduced_axis, true);

View File

@ -25,9 +25,9 @@ OutputVector builder::opset1::split(const Output<Node>& value,
const std::vector<size_t>& split_lengths, const std::vector<size_t>& split_lengths,
int64_t axis) int64_t axis)
{ {
const auto axis_node = ngraph::opset1::Constant::create(element::Type_t::i64, Shape{}, {axis}); const auto axis_node = ngraph::opset1::Constant::create(element::i64, Shape{}, {axis});
const auto split_lengths_node = ngraph::opset1::Constant::create( const auto split_lengths_node =
element::Type_t::u64, Shape{split_lengths.size()}, split_lengths); ngraph::opset1::Constant::create(element::u64, Shape{split_lengths.size()}, split_lengths);
const auto variadic_split = const auto variadic_split =
std::make_shared<ngraph::opset1::VariadicSplit>(value, axis_node, split_lengths_node); std::make_shared<ngraph::opset1::VariadicSplit>(value, axis_node, split_lengths_node);
@ -36,7 +36,7 @@ OutputVector builder::opset1::split(const Output<Node>& value,
OutputVector builder::opset1::split(const Output<Node>& value, size_t num_splits, int64_t axis) OutputVector builder::opset1::split(const Output<Node>& value, size_t num_splits, int64_t axis)
{ {
const auto axis_node = ngraph::opset1::Constant::create(element::Type_t::i64, Shape{}, {axis}); const auto axis_node = ngraph::opset1::Constant::create(element::i64, Shape{}, {axis});
const auto split = std::make_shared<ngraph::opset1::Split>(value, axis_node, num_splits); const auto split = std::make_shared<ngraph::opset1::Split>(value, axis_node, num_splits);
return split->outputs(); return split->outputs();

View File

@ -40,7 +40,7 @@ namespace ngraph
/// edge of interval. default true = includes right edge /// edge of interval. default true = includes right edge
Bucketize(const Output<Node>& data, Bucketize(const Output<Node>& data,
const Output<Node>& buckets, const Output<Node>& buckets,
const element::Type output_type = element::Type_t::i64, const element::Type output_type = element::i64,
const bool with_right_bound = true); const bool with_right_bound = true);
virtual void validate_and_infer_types() override; virtual void validate_and_infer_types() override;

View File

@ -273,31 +273,31 @@ namespace ngraph
} }
/// \brief Returns the value of the constant node as a Shape object /// \brief Returns the value of the constant node as a Shape object
/// Can only be used on element::Type_t::i64 nodes and interprets /// Can only be used on element::i64 nodes and interprets
/// negative values as zeros. /// negative values as zeros.
Shape get_shape_val() const; Shape get_shape_val() const;
/// \brief Returns the value of the constant node as a Strides /// \brief Returns the value of the constant node as a Strides
/// object /// object
/// Can only be used on element::Type_t::i64 nodes and interprets /// Can only be used on element::i64 nodes and interprets
/// negative values as zeros. /// negative values as zeros.
Strides get_strides_val() const; Strides get_strides_val() const;
/// \brief Returns the value of the constant node as a Coordinate /// \brief Returns the value of the constant node as a Coordinate
/// object /// object
/// Can only be used on element::Type_t::i64 nodes and interprets /// Can only be used on element::i64 nodes and interprets
/// negative values as zeros. /// negative values as zeros.
Coordinate get_coordinate_val() const; Coordinate get_coordinate_val() const;
/// \brief Returns the value of the constant node as a /// \brief Returns the value of the constant node as a
/// CoordinateDiff object /// CoordinateDiff object
/// Can only be used on element::Type_t::i64 nodes. /// Can only be used on element::i64 nodes.
CoordinateDiff get_coordinate_diff_val() const; CoordinateDiff get_coordinate_diff_val() const;
/// \brief Returns the value of the constant node as an AxisVector /// \brief Returns the value of the constant node as an AxisVector
/// object /// object
/// Can only be used on element::Type_t::i64 nodes and interprets /// Can only be used on element::i64 nodes and interprets
/// negative values as zeros. /// negative values as zeros.
AxisVector get_axis_vector_val() const; AxisVector get_axis_vector_val() const;
/// \brief Returns the value of the constant node as an AxisSet /// \brief Returns the value of the constant node as an AxisSet
/// object /// object
/// Can only be used on element::Type_t::i64 nodes and interprets /// Can only be used on element::i64 nodes and interprets
/// negative values as zeros. /// negative values as zeros.
/// Repeated values are allowed. /// Repeated values are allowed.
AxisSet get_axis_set_val() const; AxisSet get_axis_set_val() const;

View File

@ -117,7 +117,7 @@ namespace ngraph
R, R,
B, B,
Constant::create( Constant::create(
element::Type_t::f32, element::f32,
Shape{(lstm_direction == direction::BIDIRECTIONAL ? 2UL : 1UL), Shape{(lstm_direction == direction::BIDIRECTIONAL ? 2UL : 1UL),
3UL * static_cast<size_t>(hidden_size)}, 3UL * static_cast<size_t>(hidden_size)},
std::vector<float>{0.f}), std::vector<float>{0.f}),

View File

@ -125,15 +125,14 @@ namespace ngraph
/// \param sort_result_descending Specifies whether it is necessary to sort selected /// \param sort_result_descending Specifies whether it is necessary to sort selected
/// boxes across batches /// boxes across batches
/// \param output_type Specifies the output tensor type /// \param output_type Specifies the output tensor type
NonMaxSuppression( NonMaxSuppression(const Output<Node>& boxes,
const Output<Node>& boxes, const Output<Node>& scores,
const Output<Node>& scores, const Output<Node>& max_output_boxes_per_class,
const Output<Node>& max_output_boxes_per_class, const Output<Node>& iou_threshold,
const Output<Node>& iou_threshold, const Output<Node>& score_threshold,
const Output<Node>& score_threshold, const BoxEncodingType box_encoding = BoxEncodingType::CORNER,
const BoxEncodingType box_encoding = BoxEncodingType::CORNER, const bool sort_result_descending = true,
const bool sort_result_descending = true, const ngraph::element::Type& output_type = ngraph::element::i64);
const ngraph::element::Type& output_type = ngraph::element::Type_t::i64);
/// \brief Constructs a NonMaxSuppression operation with default values for the last /// \brief Constructs a NonMaxSuppression operation with default values for the last
/// 3 inputs /// 3 inputs
@ -144,12 +143,11 @@ namespace ngraph
/// \param sort_result_descending Specifies whether it is necessary to sort selected /// \param sort_result_descending Specifies whether it is necessary to sort selected
/// boxes across batches /// boxes across batches
/// \param output_type Specifies the output tensor type /// \param output_type Specifies the output tensor type
NonMaxSuppression( NonMaxSuppression(const Output<Node>& boxes,
const Output<Node>& boxes, const Output<Node>& scores,
const Output<Node>& scores, const BoxEncodingType box_encoding = BoxEncodingType::CORNER,
const BoxEncodingType box_encoding = BoxEncodingType::CORNER, const bool sort_result_descending = true,
const bool sort_result_descending = true, const ngraph::element::Type& output_type = ngraph::element::i64);
const ngraph::element::Type& output_type = ngraph::element::Type_t::i64);
bool visit_attributes(AttributeVisitor& visitor) override; bool visit_attributes(AttributeVisitor& visitor) override;
void validate_and_infer_types() override; void validate_and_infer_types() override;
@ -178,7 +176,7 @@ namespace ngraph
protected: protected:
BoxEncodingType m_box_encoding = BoxEncodingType::CORNER; BoxEncodingType m_box_encoding = BoxEncodingType::CORNER;
bool m_sort_result_descending = true; bool m_sort_result_descending = true;
ngraph::element::Type m_output_type = ngraph::element::Type_t::i64; ngraph::element::Type m_output_type = ngraph::element::i64;
void validate(); void validate();
int64_t max_boxes_output_from_input() const; int64_t max_boxes_output_from_input() const;
}; };
@ -207,15 +205,14 @@ namespace ngraph
/// \param sort_result_descending Specifies whether it is necessary to sort selected /// \param sort_result_descending Specifies whether it is necessary to sort selected
/// boxes across batches /// boxes across batches
/// \param output_type Specifies the output tensor type /// \param output_type Specifies the output tensor type
NonMaxSuppression( NonMaxSuppression(const Output<Node>& boxes,
const Output<Node>& boxes, const Output<Node>& scores,
const Output<Node>& scores, const Output<Node>& max_output_boxes_per_class,
const Output<Node>& max_output_boxes_per_class, const Output<Node>& iou_threshold,
const Output<Node>& iou_threshold, const Output<Node>& score_threshold,
const Output<Node>& score_threshold, const BoxEncodingType box_encoding = BoxEncodingType::CORNER,
const BoxEncodingType box_encoding = BoxEncodingType::CORNER, const bool sort_result_descending = true,
const bool sort_result_descending = true, const ngraph::element::Type& output_type = ngraph::element::i64);
const ngraph::element::Type& output_type = ngraph::element::Type_t::i64);
/// \brief Constructs a NonMaxSuppression operation with default values for the last /// \brief Constructs a NonMaxSuppression operation with default values for the last
/// 3 inputs /// 3 inputs
@ -226,12 +223,11 @@ namespace ngraph
/// \param sort_result_descending Specifies whether it is necessary to sort selected /// \param sort_result_descending Specifies whether it is necessary to sort selected
/// boxes across batches /// boxes across batches
/// \param output_type Specifies the output tensor type /// \param output_type Specifies the output tensor type
NonMaxSuppression( NonMaxSuppression(const Output<Node>& boxes,
const Output<Node>& boxes, const Output<Node>& scores,
const Output<Node>& scores, const BoxEncodingType box_encoding = BoxEncodingType::CORNER,
const BoxEncodingType box_encoding = BoxEncodingType::CORNER, const bool sort_result_descending = true,
const bool sort_result_descending = true, const ngraph::element::Type& output_type = ngraph::element::i64);
const ngraph::element::Type& output_type = ngraph::element::Type_t::i64);
void validate_and_infer_types() override; void validate_and_infer_types() override;
@ -265,12 +261,11 @@ namespace ngraph
/// \param sort_result_descending Specifies whether it is necessary to sort selected /// \param sort_result_descending Specifies whether it is necessary to sort selected
/// boxes across batches /// boxes across batches
/// \param output_type Specifies the output tensor type /// \param output_type Specifies the output tensor type
NonMaxSuppression( NonMaxSuppression(const Output<Node>& boxes,
const Output<Node>& boxes, const Output<Node>& scores,
const Output<Node>& scores, const BoxEncodingType box_encoding = BoxEncodingType::CORNER,
const BoxEncodingType box_encoding = BoxEncodingType::CORNER, const bool sort_result_descending = true,
const bool sort_result_descending = true, const ngraph::element::Type& output_type = ngraph::element::i64);
const ngraph::element::Type& output_type = ngraph::element::Type_t::i64);
/// \brief Constructs a NonMaxSuppression operation with default values in the last. /// \brief Constructs a NonMaxSuppression operation with default values in the last.
/// 3 inputs. /// 3 inputs.
@ -283,13 +278,12 @@ namespace ngraph
/// \param sort_result_descending Specifies whether it is necessary to sort selected /// \param sort_result_descending Specifies whether it is necessary to sort selected
/// boxes across batches /// boxes across batches
/// \param output_type Specifies the output tensor type /// \param output_type Specifies the output tensor type
NonMaxSuppression( NonMaxSuppression(const Output<Node>& boxes,
const Output<Node>& boxes, const Output<Node>& scores,
const Output<Node>& scores, const Output<Node>& max_output_boxes_per_class,
const Output<Node>& max_output_boxes_per_class, const BoxEncodingType box_encoding = BoxEncodingType::CORNER,
const BoxEncodingType box_encoding = BoxEncodingType::CORNER, const bool sort_result_descending = true,
const bool sort_result_descending = true, const ngraph::element::Type& output_type = ngraph::element::i64);
const ngraph::element::Type& output_type = ngraph::element::Type_t::i64);
/// \brief Constructs a NonMaxSuppression operation with default values in the last. /// \brief Constructs a NonMaxSuppression operation with default values in the last.
/// 2 inputs. /// 2 inputs.
@ -303,14 +297,13 @@ namespace ngraph
/// \param sort_result_descending Specifies whether it is necessary to sort selected /// \param sort_result_descending Specifies whether it is necessary to sort selected
/// boxes across batches /// boxes across batches
/// \param output_type Specifies the output tensor type /// \param output_type Specifies the output tensor type
NonMaxSuppression( NonMaxSuppression(const Output<Node>& boxes,
const Output<Node>& boxes, const Output<Node>& scores,
const Output<Node>& scores, const Output<Node>& max_output_boxes_per_class,
const Output<Node>& max_output_boxes_per_class, const Output<Node>& iou_threshold,
const Output<Node>& iou_threshold, const BoxEncodingType box_encoding = BoxEncodingType::CORNER,
const BoxEncodingType box_encoding = BoxEncodingType::CORNER, const bool sort_result_descending = true,
const bool sort_result_descending = true, const ngraph::element::Type& output_type = ngraph::element::i64);
const ngraph::element::Type& output_type = ngraph::element::Type_t::i64);
/// \brief Constructs a NonMaxSuppression operation with default value in the last. /// \brief Constructs a NonMaxSuppression operation with default value in the last.
/// input. /// input.
@ -325,15 +318,14 @@ namespace ngraph
/// \param sort_result_descending Specifies whether it is necessary to sort selected /// \param sort_result_descending Specifies whether it is necessary to sort selected
/// boxes across batches /// boxes across batches
/// \param output_type Specifies the output tensor type /// \param output_type Specifies the output tensor type
NonMaxSuppression( NonMaxSuppression(const Output<Node>& boxes,
const Output<Node>& boxes, const Output<Node>& scores,
const Output<Node>& scores, const Output<Node>& max_output_boxes_per_class,
const Output<Node>& max_output_boxes_per_class, const Output<Node>& iou_threshold,
const Output<Node>& iou_threshold, const Output<Node>& score_threshold,
const Output<Node>& score_threshold, const BoxEncodingType box_encoding = BoxEncodingType::CORNER,
const BoxEncodingType box_encoding = BoxEncodingType::CORNER, const bool sort_result_descending = true,
const bool sort_result_descending = true, const ngraph::element::Type& output_type = ngraph::element::i64);
const ngraph::element::Type& output_type = ngraph::element::Type_t::i64);
/// \brief Constructs a NonMaxSuppression operation. /// \brief Constructs a NonMaxSuppression operation.
/// ///
@ -348,16 +340,15 @@ namespace ngraph
/// \param sort_result_descending Specifies whether it is necessary to sort selected /// \param sort_result_descending Specifies whether it is necessary to sort selected
/// boxes across batches /// boxes across batches
/// \param output_type Specifies the output tensor type /// \param output_type Specifies the output tensor type
NonMaxSuppression( NonMaxSuppression(const Output<Node>& boxes,
const Output<Node>& boxes, const Output<Node>& scores,
const Output<Node>& scores, const Output<Node>& max_output_boxes_per_class,
const Output<Node>& max_output_boxes_per_class, const Output<Node>& iou_threshold,
const Output<Node>& iou_threshold, const Output<Node>& score_threshold,
const Output<Node>& score_threshold, const Output<Node>& soft_nms_sigma,
const Output<Node>& soft_nms_sigma, const BoxEncodingType box_encoding = BoxEncodingType::CORNER,
const BoxEncodingType box_encoding = BoxEncodingType::CORNER, const bool sort_result_descending = true,
const bool sort_result_descending = true, const ngraph::element::Type& output_type = ngraph::element::i64);
const ngraph::element::Type& output_type = ngraph::element::Type_t::i64);
bool visit_attributes(AttributeVisitor& visitor) override; bool visit_attributes(AttributeVisitor& visitor) override;
void validate_and_infer_types() override; void validate_and_infer_types() override;
@ -391,7 +382,7 @@ namespace ngraph
protected: protected:
BoxEncodingType m_box_encoding = BoxEncodingType::CORNER; BoxEncodingType m_box_encoding = BoxEncodingType::CORNER;
bool m_sort_result_descending = true; bool m_sort_result_descending = true;
ngraph::element::Type m_output_type = ngraph::element::Type_t::i64; ngraph::element::Type m_output_type = ngraph::element::i64;
void validate(); void validate();
}; };
} // namespace v5 } // namespace v5

View File

@ -74,7 +74,7 @@ namespace ngraph
const HostTensorVector& inputs) const override; const HostTensorVector& inputs) const override;
protected: protected:
element::Type m_output_type = element::Type_t::i64; element::Type m_output_type = element::i64;
}; };
} }
using v3::NonZero; using v3::NonZero;

View File

@ -33,8 +33,7 @@ namespace ngraph
const NodeTypeInfo& get_type_info() const override { return type_info; } const NodeTypeInfo& get_type_info() const override { return type_info; }
ScatterNDUpdate() = default; ScatterNDUpdate() = default;
/// \param inputs Tensor /// \param inputs Tensor
/// \param indices Index tensor: Data type must be `element::Type_t::i32` or /// \param indices Index tensor: Data type must be `element::i32` or `element::i64`
/// `element::Type_t::i64`
/// \param updates Tensor: Must have same type as inputs /// \param updates Tensor: Must have same type as inputs
ScatterNDUpdate(const Output<Node>& inputs, ScatterNDUpdate(const Output<Node>& inputs,
const Output<Node>& indices, const Output<Node>& indices,

View File

@ -32,8 +32,7 @@ namespace ngraph
const NodeTypeInfo& get_type_info() const override { return type_info; } const NodeTypeInfo& get_type_info() const override { return type_info; }
ShapeOf() = default; ShapeOf() = default;
/// \brief Constructs a shape-of operation. /// \brief Constructs a shape-of operation.
ShapeOf(const Output<Node>& arg, ShapeOf(const Output<Node>& arg, const element::Type output_type = element::i64);
const element::Type output_type = element::Type_t::i64);
bool visit_attributes(AttributeVisitor& visitor) override; bool visit_attributes(AttributeVisitor& visitor) override;
virtual std::shared_ptr<Node> virtual std::shared_ptr<Node>

View File

@ -57,14 +57,14 @@ namespace ngraph
const int64_t axis, const int64_t axis,
const std::string& mode, const std::string& mode,
const std::string& sort, const std::string& sort,
const element::Type& index_element_type = element::Type_t::i32); const element::Type& index_element_type = element::i32);
TopK(const Output<Node>& data, TopK(const Output<Node>& data,
const Output<Node>& k, const Output<Node>& k,
const int64_t axis, const int64_t axis,
const Mode mode, const Mode mode,
const SortType sort, const SortType sort,
const element::Type& index_element_type = element::Type_t::i32); const element::Type& index_element_type = element::i32);
bool visit_attributes(AttributeVisitor& visitor) override; bool visit_attributes(AttributeVisitor& visitor) override;
void validate_and_infer_types() override; void validate_and_infer_types() override;
@ -104,7 +104,7 @@ namespace ngraph
uint64_t m_normalized_axis; uint64_t m_normalized_axis;
Mode m_mode; Mode m_mode;
SortType m_sort; SortType m_sort;
element::Type m_index_element_type{element::Type_t::i32}; element::Type m_index_element_type{element::i32};
virtual size_t read_k_from_constant_node(const std::shared_ptr<Node>& node, virtual size_t read_k_from_constant_node(const std::shared_ptr<Node>& node,
const element::Type& k_element_type) const; const element::Type& k_element_type) const;
@ -146,14 +146,14 @@ namespace ngraph
const int64_t axis, const int64_t axis,
const std::string& mode, const std::string& mode,
const std::string& sort, const std::string& sort,
const element::Type& index_element_type = element::Type_t::i32); const element::Type& index_element_type = element::i32);
TopK(const Output<Node>& data, TopK(const Output<Node>& data,
const Output<Node>& k, const Output<Node>& k,
const int64_t axis, const int64_t axis,
const Mode mode, const Mode mode,
const SortType sort, const SortType sort,
const element::Type& index_element_type = element::Type_t::i32); const element::Type& index_element_type = element::i32);
bool visit_attributes(AttributeVisitor& visitor) override; bool visit_attributes(AttributeVisitor& visitor) override;
void validate_and_infer_types() override; void validate_and_infer_types() override;
virtual std::shared_ptr<Node> virtual std::shared_ptr<Node>

View File

@ -44,7 +44,7 @@ namespace ngraph
Branch() Branch()
: Pattern(OutputVector{}) : Pattern(OutputVector{})
{ {
set_output_type(0, element::Type_t::f32, Shape{}); set_output_type(0, element::f32, Shape{});
} }
void set_destination(const Output<Node>& destination) void set_destination(const Output<Node>& destination)

View File

@ -47,7 +47,7 @@ namespace ngraph
/// Example: /// Example:
/// \code{.cpp} /// \code{.cpp}
/// auto add = a + b; // a and b are op::Parameter in this example /// auto add = a + b; // a and b are op::Parameter in this example
/// auto label = std::make_shared<pattern::op::Label>(element::Type_t::f32, /// auto label = std::make_shared<pattern::op::Label>(element::f32,
/// Shape{2,2}, /// Shape{2,2},
/// nullptr, /// nullptr,
/// OutputVector{add}); /// OutputVector{add});
@ -61,7 +61,7 @@ namespace ngraph
set_output_type(0, type, s); set_output_type(0, type, s);
} }
explicit Label(const element::Type& type = element::Type_t::dynamic, explicit Label(const element::Type& type = element::dynamic,
const PartialShape& s = PartialShape::dynamic()) const PartialShape& s = PartialShape::dynamic())
: Label(type, s, [](const Output<Node>&) { return true; }, OutputVector()) : Label(type, s, [](const Output<Node>&) { return true; }, OutputVector())
{ {

View File

@ -76,12 +76,10 @@ namespace ngraph
/// because when we reconstruct the new x node, it will see that the shapes are inconsistent /// because when we reconstruct the new x node, it will see that the shapes are inconsistent
/// for elementwise add. /// for elementwise add.
/// ///
/// Specialization of element types is also possible: `element::Type_t::dynamic` can be /// Specialization of element types is also possible: `element::dynamic` can be specialized
/// specialized
/// to a concrete element type or left dynamic; but a concrete element type can only be /// to a concrete element type or left dynamic; but a concrete element type can only be
/// specialized to itself (e.g., specialization does not allow you to change /// specialized to itself (e.g., specialization does not allow you to change `element::i32`
/// `element::Type_t::i32` /// to `element::i64`).
/// to `element::Type_t::i64`).
/// ///
/// Finally, it is possible to specialize parameter values. If the ith element of /// Finally, it is possible to specialize parameter values. If the ith element of
/// `parameter_values` is not `nullptr`, and fully static element type and shape has been /// `parameter_values` is not `nullptr`, and fully static element type and shape has been

View File

@ -91,6 +91,7 @@ namespace ngraph
// The name of this type, the enum name of this type // The name of this type, the enum name of this type
const std::string& get_type_name() const; const std::string& get_type_name() const;
friend NGRAPH_API std::ostream& operator<<(std::ostream&, const Type&); friend NGRAPH_API std::ostream& operator<<(std::ostream&, const Type&);
static std::vector<const Type*> get_known_types();
/// \brief Checks whether this element type is merge-compatible with `t`. /// \brief Checks whether this element type is merge-compatible with `t`.
/// \param t The element type to compare this element type to. /// \param t The element type to compare this element type to.

View File

@ -49,8 +49,8 @@ namespace ngraph
input_descs.size() + (cur_iter_idx >= 0 ? !cur_iter_initial_value_exist : 0); input_descs.size() + (cur_iter_idx >= 0 ? !cur_iter_initial_value_exist : 0);
HostTensorVector inputs_to_body; HostTensorVector inputs_to_body;
for (int64_t i = 0; i < inputs_count; ++i) for (int64_t i = 0; i < inputs_count; ++i)
inputs_to_body.push_back(std::make_shared<HostTensor>(element::Type_t::dynamic, inputs_to_body.push_back(
PartialShape::dynamic())); std::make_shared<HostTensor>(element::dynamic, PartialShape::dynamic()));
if (cur_iter_idx >= 0 && !cur_iter_initial_value_exist) if (cur_iter_idx >= 0 && !cur_iter_initial_value_exist)
{ {
const auto& cur_iter = func->get_parameters().at(cur_iter_idx); const auto& cur_iter = func->get_parameters().at(cur_iter_idx);
@ -90,12 +90,12 @@ namespace ngraph
// Get TripCount // Get TripCount
int64_t trip_count = 0; int64_t trip_count = 0;
if (args[0]->get_element_type() == ngraph::element::Type_t::i32) if (args[0]->get_element_type() == ngraph::element::i32)
{ {
auto* trip_count_p = args[0]->get_data_ptr<int32_t>(); auto* trip_count_p = args[0]->get_data_ptr<int32_t>();
trip_count = trip_count_p[0]; trip_count = trip_count_p[0];
} }
else if (args[0]->get_element_type() == ngraph::element::Type_t::i64) else if (args[0]->get_element_type() == ngraph::element::i64)
{ {
auto* trip_count_p = args[0]->get_data_ptr<int64_t>(); auto* trip_count_p = args[0]->get_data_ptr<int64_t>();
trip_count = trip_count_p[0]; trip_count = trip_count_p[0];
@ -204,10 +204,10 @@ namespace ngraph
{ {
const auto& cur_iter_param = func->get_parameters().at(cur_iter_idx); const auto& cur_iter_param = func->get_parameters().at(cur_iter_idx);
int64_t iter_num = cur_iter + 1; int64_t iter_num = cur_iter + 1;
if (cur_iter_param->get_element_type() == element::Type_t::i64) if (cur_iter_param->get_element_type() == element::i64)
inputs_to_body.at(cur_iter_idx) inputs_to_body.at(cur_iter_idx)
->write(&iter_num, cur_iter_param->get_element_type().size()); ->write(&iter_num, cur_iter_param->get_element_type().size());
else if (cur_iter_param->get_element_type() == element::Type_t::i32) else if (cur_iter_param->get_element_type() == element::i32)
{ {
int32_t iter_num_i32 = static_cast<int32_t>(iter_num); int32_t iter_num_i32 = static_cast<int32_t>(iter_num);
inputs_to_body.at(cur_iter_idx) inputs_to_body.at(cur_iter_idx)

View File

@ -326,7 +326,7 @@ namespace ngraph
size_t selected_size = valid_outputs * 3; size_t selected_size = valid_outputs * 3;
if (output_type == ngraph::element::Type_t::i64) if (output_type == ngraph::element::i64)
{ {
int64_t* indices_ptr = outputs[0]->get_data_ptr<int64_t>(); int64_t* indices_ptr = outputs[0]->get_data_ptr<int64_t>();
memcpy(indices_ptr, selected_indices.data(), selected_size * sizeof(int64_t)); memcpy(indices_ptr, selected_indices.data(), selected_size * sizeof(int64_t));
@ -381,7 +381,7 @@ namespace ngraph
return; return;
} }
if (output_type == ngraph::element::Type_t::i64) if (output_type == ngraph::element::i64)
{ {
int64_t* valid_outputs_ptr = outputs[2]->get_data_ptr<int64_t>(); int64_t* valid_outputs_ptr = outputs[2]->get_data_ptr<int64_t>();
*valid_outputs_ptr = valid_outputs; *valid_outputs_ptr = valid_outputs;

View File

@ -35,8 +35,8 @@ namespace ngraph
{ {
HostTensorVector inputs_to_body; HostTensorVector inputs_to_body;
for (int64_t i = 0; i < input_descs.size(); ++i) for (int64_t i = 0; i < input_descs.size(); ++i)
inputs_to_body.push_back(std::make_shared<HostTensor>(element::Type_t::dynamic, inputs_to_body.push_back(
PartialShape::dynamic())); std::make_shared<HostTensor>(element::dynamic, PartialShape::dynamic()));
// Port map processing: inputs and back edges // Port map processing: inputs and back edges
struct BackEdge struct BackEdge

View File

@ -587,7 +587,7 @@ std::shared_ptr<Node> ngraph::make_zero(const element::Type& element_type, const
if (shape.size() > 0) if (shape.size() > 0)
{ {
return std::make_shared<op::v1::Broadcast>( return std::make_shared<op::v1::Broadcast>(
zero, op::Constant::create(element::Type_t::u64, Shape{shape.size()}, shape)); zero, op::Constant::create(element::u64, Shape{shape.size()}, shape));
} }
return zero; return zero;
} }

View File

@ -213,8 +213,8 @@ descriptor::Output& Node::get_output_descriptor(size_t position)
while (m_outputs.size() <= position) while (m_outputs.size() <= position)
{ {
size_t i = m_outputs.size(); size_t i = m_outputs.size();
auto tensor_descriptor = make_shared<descriptor::Tensor>( auto tensor_descriptor =
element::Type_t::dynamic, PartialShape::dynamic(), this, i); make_shared<descriptor::Tensor>(element::dynamic, PartialShape::dynamic(), this, i);
m_outputs.emplace_back(this, i, tensor_descriptor); m_outputs.emplace_back(this, i, tensor_descriptor);
} }
return m_outputs.at(position); return m_outputs.at(position);

View File

@ -260,7 +260,7 @@ op::v1::Broadcast::Broadcast(const Output<Node>& arg,
const AutoBroadcastSpec& broadcast_spec) const AutoBroadcastSpec& broadcast_spec)
: util::BroadcastBase{arg, : util::BroadcastBase{arg,
target_shape, target_shape,
op::v0::Constant::create(element::Type_t::u8, Shape{}, {0})->output(0), op::v0::Constant::create(element::u8, Shape{}, {0})->output(0),
to_broadcast_mode(broadcast_spec)} to_broadcast_mode(broadcast_spec)}
, m_broadcast_spec{broadcast_spec} , m_broadcast_spec{broadcast_spec}
{ {

View File

@ -45,8 +45,7 @@ void op::v3::Bucketize::validate_and_infer_types()
const PartialShape& buckets_pshape = get_input_partial_shape(1); const PartialShape& buckets_pshape = get_input_partial_shape(1);
NODE_VALIDATION_CHECK(this, NODE_VALIDATION_CHECK(this,
m_output_type == element::Type_t::i64 || m_output_type == element::i64 || m_output_type == element::i32,
m_output_type == element::Type_t::i32,
"Output type must be i32 or i64. Default is i64"); "Output type must be i32 or i64. Default is i64");
if (buckets_pshape.is_static()) if (buckets_pshape.is_static())

View File

@ -50,7 +50,7 @@ void op::Concat::validate_and_infer_types()
NODE_VALIDATION_CHECK(this, get_input_size() >= 1, "At least one argument required."); NODE_VALIDATION_CHECK(this, get_input_size() >= 1, "At least one argument required.");
PartialShape inputs_shape_scheme{PartialShape::dynamic()}; PartialShape inputs_shape_scheme{PartialShape::dynamic()};
element::Type inputs_et{element::Type_t::dynamic}; element::Type inputs_et{element::dynamic};
Dimension concatenation_axis_output_dim{0}; Dimension concatenation_axis_output_dim{0};
for (uint64_t i = 0; i < get_input_size(); i++) for (uint64_t i = 0; i < get_input_size(); i++)

View File

@ -482,7 +482,7 @@ Shape op::Constant::get_shape_val() const
Strides op::Constant::get_strides_val() const Strides op::Constant::get_strides_val() const
{ {
NGRAPH_CHECK(m_element_type == element::Type_t::i64); NGRAPH_CHECK(m_element_type == element::i64);
std::vector<int64_t> out_strides = cast_vector<int64_t>(); std::vector<int64_t> out_strides = cast_vector<int64_t>();
Strides output_strides(shape_size(m_shape)); Strides output_strides(shape_size(m_shape));
std::transform(out_strides.begin(), std::transform(out_strides.begin(),
@ -494,7 +494,7 @@ Strides op::Constant::get_strides_val() const
Coordinate op::Constant::get_coordinate_val() const Coordinate op::Constant::get_coordinate_val() const
{ {
NGRAPH_CHECK(m_element_type == element::Type_t::i64); NGRAPH_CHECK(m_element_type == element::i64);
std::vector<int64_t> out_coordinate = cast_vector<int64_t>(); std::vector<int64_t> out_coordinate = cast_vector<int64_t>();
Coordinate output_coordinate(shape_size(m_shape)); Coordinate output_coordinate(shape_size(m_shape));
std::transform(out_coordinate.begin(), std::transform(out_coordinate.begin(),
@ -506,7 +506,7 @@ Coordinate op::Constant::get_coordinate_val() const
CoordinateDiff op::Constant::get_coordinate_diff_val() const CoordinateDiff op::Constant::get_coordinate_diff_val() const
{ {
NGRAPH_CHECK(m_element_type == element::Type_t::i64); NGRAPH_CHECK(m_element_type == element::i64);
std::vector<int64_t> out_coordinate_diff = cast_vector<int64_t>(); std::vector<int64_t> out_coordinate_diff = cast_vector<int64_t>();
CoordinateDiff output_coordinate_diff(shape_size(m_shape)); CoordinateDiff output_coordinate_diff(shape_size(m_shape));
std::transform(out_coordinate_diff.begin(), std::transform(out_coordinate_diff.begin(),

View File

@ -37,7 +37,7 @@ op::v0::CumSum::CumSum(const Output<Node>& arg,
} }
op::v0::CumSum::CumSum(const Output<Node>& arg, const bool exclusive, const bool reverse) op::v0::CumSum::CumSum(const Output<Node>& arg, const bool exclusive, const bool reverse)
: Op({arg, op::Constant::create(element::Type_t::i32, Shape{}, {0})}) : Op({arg, op::Constant::create(element::i32, Shape{}, {0})})
, m_exclusive(exclusive) , m_exclusive(exclusive)
, m_reverse(reverse) , m_reverse(reverse)
{ {
@ -65,7 +65,7 @@ void op::v0::CumSum::validate_and_infer_types()
const auto& axis_type = get_input_element_type(1); const auto& axis_type = get_input_element_type(1);
NODE_VALIDATION_CHECK(this, NODE_VALIDATION_CHECK(this,
axis_type == element::Type_t::i32 || axis_type == element::Type_t::i64, axis_type == element::i32 || axis_type == element::i64,
"axis element type must be either int64_t or int32_t but got (", "axis element type must be either int64_t or int32_t but got (",
axis_type, axis_type,
")."); ").");

View File

@ -49,11 +49,11 @@ void op::DetectionOutput::validate_and_infer_types()
{ {
auto box_logits_shape = get_input_partial_shape(0).to_shape(); auto box_logits_shape = get_input_partial_shape(0).to_shape();
set_output_type( set_output_type(
0, element::Type_t::f32, Shape{1, 1, m_attrs.keep_top_k[0] * box_logits_shape[0], 7}); 0, element::f32, Shape{1, 1, m_attrs.keep_top_k[0] * box_logits_shape[0], 7});
} }
else else
{ {
set_output_type(0, element::Type_t::f32, PartialShape::dynamic()); set_output_type(0, element::f32, PartialShape::dynamic());
} }
} }

View File

@ -56,18 +56,18 @@ op::v3::EmbeddingSegmentsSum::EmbeddingSegmentsSum(const Output<Node>& emb_table
void op::v3::EmbeddingSegmentsSum::validate_and_infer_types() void op::v3::EmbeddingSegmentsSum::validate_and_infer_types()
{ {
NODE_VALIDATION_CHECK(this, NODE_VALIDATION_CHECK(this,
get_input_element_type(SEGMENT_IDS) == element::Type_t::i64 || get_input_element_type(SEGMENT_IDS) == element::i64 ||
get_input_element_type(SEGMENT_IDS) == element::Type_t::i32, get_input_element_type(SEGMENT_IDS) == element::i32,
"SEGMENT_IDS type must be i32 or i64"); "SEGMENT_IDS type must be i32 or i64");
NODE_VALIDATION_CHECK(this, NODE_VALIDATION_CHECK(this,
get_input_element_type(INDICES) == element::Type_t::i64 || get_input_element_type(INDICES) == element::i64 ||
get_input_element_type(INDICES) == element::Type_t::i32, get_input_element_type(INDICES) == element::i32,
"INDICES type must be i32 or i64"); "INDICES type must be i32 or i64");
NODE_VALIDATION_CHECK(this, NODE_VALIDATION_CHECK(this,
get_input_element_type(NUM_SEGMENTS) == element::Type_t::i64 || get_input_element_type(NUM_SEGMENTS) == element::i64 ||
get_input_element_type(NUM_SEGMENTS) == element::Type_t::i32, get_input_element_type(NUM_SEGMENTS) == element::i32,
"NUM_SEGMENTS type must be i32 or i64"); "NUM_SEGMENTS type must be i32 or i64");
NODE_VALIDATION_CHECK( NODE_VALIDATION_CHECK(
@ -110,8 +110,8 @@ void op::v3::EmbeddingSegmentsSum::validate_and_infer_types()
if (get_input_size() >= 5) if (get_input_size() >= 5)
{ {
NODE_VALIDATION_CHECK(this, NODE_VALIDATION_CHECK(this,
get_input_element_type(DEFAULT_INDEX) == element::Type_t::i64 || get_input_element_type(DEFAULT_INDEX) == element::i64 ||
get_input_element_type(DEFAULT_INDEX) == element::Type_t::i32, get_input_element_type(DEFAULT_INDEX) == element::i32,
"DEFAULT_INDEX type must be i32 or i64"); "DEFAULT_INDEX type must be i32 or i64");
NODE_VALIDATION_CHECK( NODE_VALIDATION_CHECK(

View File

@ -47,7 +47,7 @@ namespace equal
const op::AutoBroadcastSpec& broadcast_spec) const op::AutoBroadcastSpec& broadcast_spec)
{ {
bool rc = true; bool rc = true;
out->set_broadcast(broadcast_spec, arg0, arg1, element::Type_t::boolean); out->set_broadcast(broadcast_spec, arg0, arg1, element::boolean);
switch (arg0->get_element_type()) switch (arg0->get_element_type())
{ {
TYPE_CASE(boolean)(arg0, arg1, out, broadcast_spec); TYPE_CASE(boolean)(arg0, arg1, out, broadcast_spec);

View File

@ -136,7 +136,7 @@ OutputVector op::FakeQuantize::decompose_op() const
std::make_shared<op::v1::Subtract>(output_high, output_low), levels_minus_one); std::make_shared<op::v1::Subtract>(output_high, output_low), levels_minus_one);
// zero_point type needs to match the quantization output type // zero_point type needs to match the quantization output type
const auto zero_point = Constant::create(element::Type_t::i32, data.get_shape(), {0.0}); const auto zero_point = Constant::create(element::i32, data.get_shape(), {0.0});
const auto axes = get_default_order(input_data_shape); const auto axes = get_default_order(input_data_shape);
// clip the input data to the range <input_low;input_high> // clip the input data to the range <input_low;input_high>
@ -150,7 +150,7 @@ OutputVector op::FakeQuantize::decompose_op() const
make_shared<op::Quantize>(data, make_shared<op::Quantize>(data,
quant_scale, quant_scale,
zero_point, zero_point,
element::Type_t::i32, element::i32,
axes, axes,
op::Quantize::RoundMode::ROUND_NEAREST_TOWARD_EVEN); op::Quantize::RoundMode::ROUND_NEAREST_TOWARD_EVEN);

View File

@ -167,7 +167,7 @@ namespace gather
out->set_shape(out_shape); out->set_shape(out_shape);
if (arg1->get_element_type() == element::Type_t::i64) if (arg1->get_element_type() == element::i64)
{ {
runtime::reference::gather<T, int64_t>(arg0->get_data_ptr<ET>(), runtime::reference::gather<T, int64_t>(arg0->get_data_ptr<ET>(),
arg1->get_data_ptr<int64_t>(), arg1->get_data_ptr<int64_t>(),
@ -177,7 +177,7 @@ namespace gather
out->get_shape(), out->get_shape(),
axis); axis);
} }
else if (arg1->get_element_type() == element::Type_t::i32) else if (arg1->get_element_type() == element::i32)
{ {
runtime::reference::gather<T, int32_t>(arg0->get_data_ptr<ET>(), runtime::reference::gather<T, int32_t>(arg0->get_data_ptr<ET>(),
arg1->get_data_ptr<int32_t>(), arg1->get_data_ptr<int32_t>(),
@ -280,7 +280,7 @@ namespace gather
if (indices_shape.empty()) if (indices_shape.empty())
{ {
// gathering a scalar // gathering a scalar
const auto axes = op::Constant::create(element::Type_t::i64, Shape{1}, {0}); const auto axes = op::Constant::create(element::i64, Shape{1}, {0});
gathered = make_shared<op::v0::Squeeze>(gathered_concat_input, axes); gathered = make_shared<op::v0::Squeeze>(gathered_concat_input, axes);
} }

View File

@ -47,7 +47,7 @@ namespace greaterop
const op::AutoBroadcastSpec& broadcast_spec) const op::AutoBroadcastSpec& broadcast_spec)
{ {
bool rc = true; bool rc = true;
out->set_broadcast(broadcast_spec, arg0, arg1, element::Type_t::boolean); out->set_broadcast(broadcast_spec, arg0, arg1, element::boolean);
switch (arg0->get_element_type()) switch (arg0->get_element_type())
{ {
TYPE_CASE(boolean)(arg0, arg1, out, broadcast_spec); TYPE_CASE(boolean)(arg0, arg1, out, broadcast_spec);

View File

@ -47,7 +47,7 @@ namespace greater_equalop
const op::AutoBroadcastSpec& broadcast_spec) const op::AutoBroadcastSpec& broadcast_spec)
{ {
bool rc = true; bool rc = true;
out->set_broadcast(broadcast_spec, arg0, arg1, element::Type_t::boolean); out->set_broadcast(broadcast_spec, arg0, arg1, element::boolean);
switch (arg0->get_element_type()) switch (arg0->get_element_type())
{ {
TYPE_CASE(boolean)(arg0, arg1, out, broadcast_spec); TYPE_CASE(boolean)(arg0, arg1, out, broadcast_spec);

View File

@ -78,7 +78,7 @@ OutputVector op::GRN::decompose_op() const
data = builder::opset1::reshape(data, data_shape); data = builder::opset1::reshape(data, data_shape);
} }
const auto axis_set_const = op::Constant::create(element::Type_t::i64, {}, {1}); const auto axis_set_const = op::Constant::create(element::i64, {}, {1});
// Calculate l2 norm across channels. // Calculate l2 norm across channels.
shared_ptr<Node> norm = builder::opset1::l2_norm(data, axis_set_const, m_bias); shared_ptr<Node> norm = builder::opset1::l2_norm(data, axis_set_const, m_bias);
// Get back reduced axis. // Get back reduced axis.

View File

@ -119,7 +119,7 @@ void op::v3::GRUCell::validate_and_infer_types()
} }
auto merged_batch_size = Dimension::dynamic(); auto merged_batch_size = Dimension::dynamic();
auto merged_hidden_size = Dimension::dynamic(); auto merged_hidden_size = Dimension::dynamic();
element::Type result_et = element::Type_t::dynamic; auto result_et = element::dynamic;
// Get input partial shape for all inputs // Get input partial shape for all inputs
const auto& x_pshape = get_input_partial_shape(0); const auto& x_pshape = get_input_partial_shape(0);

View File

@ -74,7 +74,7 @@ void op::v5::GRUSequence::validate_and_infer_types()
auto merged_batch_size = Dimension::dynamic(); auto merged_batch_size = Dimension::dynamic();
auto merged_hidden_size = Dimension::dynamic(); auto merged_hidden_size = Dimension::dynamic();
auto merged_num_directions = Dimension::dynamic(); auto merged_num_directions = Dimension::dynamic();
element::Type result_et = element::Type_t::dynamic; auto result_et = element::dynamic;
auto x_pshape = get_input_partial_shape(0); auto x_pshape = get_input_partial_shape(0);
auto ht_pshape = get_input_partial_shape(1); auto ht_pshape = get_input_partial_shape(1);

View File

@ -221,8 +221,8 @@ void op::v4::Interpolate::validate_and_infer_types()
{ {
element::Type input_et = get_input_element_type(0); element::Type input_et = get_input_element_type(0);
NODE_VALIDATION_CHECK(this, NODE_VALIDATION_CHECK(this,
input_et == element::Type_t::f32 || input_et == element::Type_t::f16 || input_et == element::f32 || input_et == element::f16 ||
input_et == element::Type_t::i8 || input_et == element::Type_t::bf16, input_et == element::i8 || input_et == element::bf16,
"Input element type must be f32, f16, bf16 or i8"); "Input element type must be f32, f16, bf16 or i8");
PartialShape input_shape = PartialShape(get_input_partial_shape(0)); PartialShape input_shape = PartialShape(get_input_partial_shape(0));

View File

@ -47,7 +47,7 @@ namespace lessop
const op::AutoBroadcastSpec& broadcast_spec) const op::AutoBroadcastSpec& broadcast_spec)
{ {
bool rc = true; bool rc = true;
out->set_broadcast(broadcast_spec, arg0, arg1, element::Type_t::boolean); out->set_broadcast(broadcast_spec, arg0, arg1, element::boolean);
switch (arg0->get_element_type()) switch (arg0->get_element_type())
{ {
TYPE_CASE(boolean)(arg0, arg1, out, broadcast_spec); TYPE_CASE(boolean)(arg0, arg1, out, broadcast_spec);

View File

@ -65,7 +65,7 @@ namespace less_equalop
const op::AutoBroadcastSpec& broadcast_spec) const op::AutoBroadcastSpec& broadcast_spec)
{ {
bool rc = true; bool rc = true;
out->set_broadcast(broadcast_spec, arg0, arg1, element::Type_t::boolean); out->set_broadcast(broadcast_spec, arg0, arg1, element::boolean);
switch (arg0->get_element_type()) switch (arg0->get_element_type())
{ {
TYPE_CASE(boolean)(arg0, arg1, out, broadcast_spec); TYPE_CASE(boolean)(arg0, arg1, out, broadcast_spec);

View File

@ -25,7 +25,7 @@ using namespace ngraph;
constexpr NodeTypeInfo op::LRN::type_info; constexpr NodeTypeInfo op::LRN::type_info;
op::LRN::LRN(const Output<Node>& arg, double alpha, double beta, double bias, size_t size) op::LRN::LRN(const Output<Node>& arg, double alpha, double beta, double bias, size_t size)
: LRN(arg, op::Constant::create(element::Type_t::i64, Shape{1}, {1}), alpha, beta, bias, size) : LRN(arg, op::Constant::create(element::i64, Shape{1}, {1}), alpha, beta, bias, size)
{ {
add_provenance_group_member(input_value(1).get_node_shared_ptr()); add_provenance_group_member(input_value(1).get_node_shared_ptr());
} }

View File

@ -156,7 +156,7 @@ void op::v0::LSTMCell::validate_and_infer_types()
auto merged_batch_size = Dimension::dynamic(); auto merged_batch_size = Dimension::dynamic();
auto merged_hidden_size = Dimension::dynamic(); auto merged_hidden_size = Dimension::dynamic();
element::Type result_et = element::Type_t::dynamic; auto result_et = element::dynamic;
// Copy all inputs without peephole (7th input) and initial_cell_state (2nd input) information // Copy all inputs without peephole (7th input) and initial_cell_state (2nd input) information
// for further validation // for further validation
@ -457,7 +457,7 @@ void op::v4::LSTMCell::validate_and_infer_types()
} }
auto merged_batch_size = Dimension::dynamic(); auto merged_batch_size = Dimension::dynamic();
auto merged_hidden_size = Dimension::dynamic(); auto merged_hidden_size = Dimension::dynamic();
element::Type result_et = element::Type_t::dynamic; auto result_et = element::dynamic;
// Get input partial shape for all inputs // Get input partial shape for all inputs
const auto& x_pshape = get_input_partial_shape(0); const auto& x_pshape = get_input_partial_shape(0);

View File

@ -131,10 +131,8 @@ shared_ptr<Node> op::v0::LSTMSequence::get_masked_node(const Output<Node>& data,
// Create predicate nodes. The condition is whether current time step value // Create predicate nodes. The condition is whether current time step value
// is greater than sequence length for respective batch inputs. // is greater than sequence length for respective batch inputs.
shared_ptr<Node> curr_time_step_node = shared_ptr<Node> curr_time_step_node = opset1::Constant::create(
opset1::Constant::create(element::Type_t::i32, element::i32, data.get_shape(), vector<int32_t>(shape_size(data.get_shape()), time_step));
data.get_shape(),
vector<int32_t>(shape_size(data.get_shape()), time_step));
Output<Node> batch_seq_length = builder::opset1::legacy_broadcast_for_binary_operation( Output<Node> batch_seq_length = builder::opset1::legacy_broadcast_for_binary_operation(
curr_time_step_node, input_value(3).get_node_shared_ptr(), batch_axis); curr_time_step_node, input_value(3).get_node_shared_ptr(), batch_axis);
@ -272,7 +270,7 @@ void op::v0::LSTMSequence::validate_and_infer_types()
auto merged_batch_size = Dimension::dynamic(); auto merged_batch_size = Dimension::dynamic();
auto merged_hidden_size = Dimension::dynamic(); auto merged_hidden_size = Dimension::dynamic();
auto merged_num_directions = Dimension::dynamic(); auto merged_num_directions = Dimension::dynamic();
element::Type result_et = element::Type_t::dynamic; auto result_et = element::dynamic;
// Copy all inputs without peephole and initial_cell_state information for further validation // Copy all inputs without peephole and initial_cell_state information for further validation
for (size_t i = 0; i < get_input_size() - 1; i++) for (size_t i = 0; i < get_input_size() - 1; i++)
@ -470,7 +468,7 @@ void op::v5::LSTMSequence::validate_and_infer_types()
auto merged_batch_size = Dimension::dynamic(); auto merged_batch_size = Dimension::dynamic();
auto merged_hidden_size = Dimension::dynamic(); auto merged_hidden_size = Dimension::dynamic();
auto merged_num_directions = Dimension::dynamic(); auto merged_num_directions = Dimension::dynamic();
element::Type result_et = element::Type_t::dynamic; auto result_et = element::dynamic;
// Copy all inputs without initial_cell_state information for further validation // Copy all inputs without initial_cell_state information for further validation
for (size_t i = 0; i < get_input_size(); i++) for (size_t i = 0; i < get_input_size(); i++)

View File

@ -52,9 +52,8 @@ OutputVector op::v1::Mod::decompose_op() const
const auto divisor = make_shared<op::Abs>(input_value(1)); const auto divisor = make_shared<op::Abs>(input_value(1));
// truncated(a / b) // truncated(a / b)
auto division = auto division = make_shared<op::Convert>(
make_shared<op::Convert>(make_shared<op::v1::Divide>(dividend, divisor, m_auto_broadcast), make_shared<op::v1::Divide>(dividend, divisor, m_auto_broadcast), ngraph::element::i64);
ngraph::element::Type_t::i64);
division = make_shared<op::Convert>(division, dividend_et); division = make_shared<op::Convert>(division, dividend_et);
// truncated(a / b) * b // truncated(a / b) * b
const auto multiplication = make_shared<op::v1::Multiply>(division, divisor, m_auto_broadcast); const auto multiplication = make_shared<op::v1::Multiply>(division, divisor, m_auto_broadcast);

View File

@ -52,9 +52,9 @@ op::v1::NonMaxSuppression::NonMaxSuppression(
const bool sort_result_descending) const bool sort_result_descending)
: Op({boxes, : Op({boxes,
scores, scores,
op::Constant::create(element::Type_t::i64, Shape{}, {0}), op::Constant::create(element::i64, Shape{}, {0}),
op::Constant::create(element::Type_t::f32, Shape{}, {.0f}), op::Constant::create(element::f32, Shape{}, {.0f}),
op::Constant::create(element::Type_t::f32, Shape{}, {.0f})}) op::Constant::create(element::f32, Shape{}, {.0f})})
, m_box_encoding{box_encoding} , m_box_encoding{box_encoding}
, m_sort_result_descending{sort_result_descending} , m_sort_result_descending{sort_result_descending}
{ {
@ -71,13 +71,13 @@ std::shared_ptr<Node>
const auto& arg2 = new_args.size() > 2 const auto& arg2 = new_args.size() > 2
? new_args.at(2) ? new_args.at(2)
: ngraph::op::Constant::create(element::Type_t::i32, Shape{}, {0}); : ngraph::op::Constant::create(element::i32, Shape{}, {0});
const auto& arg3 = new_args.size() > 3 const auto& arg3 = new_args.size() > 3
? new_args.at(3) ? new_args.at(3)
: ngraph::op::Constant::create(element::Type_t::f32, Shape{}, {.0f}); : ngraph::op::Constant::create(element::f32, Shape{}, {.0f});
const auto& arg4 = new_args.size() > 4 const auto& arg4 = new_args.size() > 4
? new_args.at(4) ? new_args.at(4)
: ngraph::op::Constant::create(element::Type_t::f32, Shape{}, {.0f}); : ngraph::op::Constant::create(element::f32, Shape{}, {.0f});
return std::make_shared<op::v1::NonMaxSuppression>( return std::make_shared<op::v1::NonMaxSuppression>(
new_args.at(0), new_args.at(1), arg2, arg3, arg4, m_box_encoding, m_sort_result_descending); new_args.at(0), new_args.at(1), arg2, arg3, arg4, m_box_encoding, m_sort_result_descending);
@ -98,7 +98,7 @@ void op::v1::NonMaxSuppression::validate_and_infer_types()
// the spec doesn't say what exact type should be used for the output of this op // the spec doesn't say what exact type should be used for the output of this op
// that's why we're setting it to 64-bit integer to provide the maximum range of values support // that's why we're setting it to 64-bit integer to provide the maximum range of values support
// this will be changed (configurable) in the next version of this op // this will be changed (configurable) in the next version of this op
const auto& output_element_type = element::Type_t::i64; const auto& output_element_type = element::i64;
// NonMaxSuppression produces triplets // NonMaxSuppression produces triplets
// that have the following format: [batch_index, class_index, box_index] // that have the following format: [batch_index, class_index, box_index]
@ -249,9 +249,9 @@ op::v3::NonMaxSuppression::NonMaxSuppression(
const element::Type& output_type) const element::Type& output_type)
: Op({boxes, : Op({boxes,
scores, scores,
op::Constant::create(element::Type_t::i64, Shape{}, {0}), op::Constant::create(element::i64, Shape{}, {0}),
op::Constant::create(element::Type_t::f32, Shape{}, {.0f}), op::Constant::create(element::f32, Shape{}, {.0f}),
op::Constant::create(element::Type_t::f32, Shape{}, {.0f})}) op::Constant::create(element::f32, Shape{}, {.0f})})
, m_box_encoding{box_encoding} , m_box_encoding{box_encoding}
, m_sort_result_descending{sort_result_descending} , m_sort_result_descending{sort_result_descending}
, m_output_type{output_type} , m_output_type{output_type}
@ -269,13 +269,13 @@ std::shared_ptr<Node>
const auto& arg2 = new_args.size() > 2 const auto& arg2 = new_args.size() > 2
? new_args.at(2) ? new_args.at(2)
: ngraph::op::Constant::create(element::Type_t::i32, Shape{}, {0}); : ngraph::op::Constant::create(element::i32, Shape{}, {0});
const auto& arg3 = new_args.size() > 3 const auto& arg3 = new_args.size() > 3
? new_args.at(3) ? new_args.at(3)
: ngraph::op::Constant::create(element::Type_t::f32, Shape{}, {.0f}); : ngraph::op::Constant::create(element::f32, Shape{}, {.0f});
const auto& arg4 = new_args.size() > 4 const auto& arg4 = new_args.size() > 4
? new_args.at(4) ? new_args.at(4)
: ngraph::op::Constant::create(element::Type_t::f32, Shape{}, {.0f}); : ngraph::op::Constant::create(element::f32, Shape{}, {.0f});
return std::make_shared<op::v3::NonMaxSuppression>(new_args.at(0), return std::make_shared<op::v3::NonMaxSuppression>(new_args.at(0),
new_args.at(1), new_args.at(1),
@ -301,8 +301,7 @@ void op::v3::NonMaxSuppression::validate()
const auto scores_ps = get_input_partial_shape(1); const auto scores_ps = get_input_partial_shape(1);
NODE_VALIDATION_CHECK(this, NODE_VALIDATION_CHECK(this,
m_output_type == element::Type_t::i64 || m_output_type == element::i64 || m_output_type == element::i32,
m_output_type == element::Type_t::i32,
"Output type must be i32 or i64"); "Output type must be i32 or i64");
if (boxes_ps.is_dynamic() || scores_ps.is_dynamic()) if (boxes_ps.is_dynamic() || scores_ps.is_dynamic())
@ -469,9 +468,9 @@ op::v4::NonMaxSuppression::NonMaxSuppression(
const element::Type& output_type) const element::Type& output_type)
: op::v3::NonMaxSuppression(boxes, : op::v3::NonMaxSuppression(boxes,
scores, scores,
op::Constant::create(element::Type_t::i64, Shape{}, {0}), op::Constant::create(element::i64, Shape{}, {0}),
op::Constant::create(element::Type_t::f32, Shape{}, {.0f}), op::Constant::create(element::f32, Shape{}, {.0f}),
op::Constant::create(element::Type_t::f32, Shape{}, {.0f}), op::Constant::create(element::f32, Shape{}, {.0f}),
box_encoding, box_encoding,
sort_result_descending, sort_result_descending,
output_type) output_type)
@ -489,13 +488,13 @@ std::shared_ptr<Node>
const auto& arg2 = new_args.size() > 2 const auto& arg2 = new_args.size() > 2
? new_args.at(2) ? new_args.at(2)
: ngraph::op::Constant::create(element::Type_t::i32, Shape{}, {0}); : ngraph::op::Constant::create(element::i32, Shape{}, {0});
const auto& arg3 = new_args.size() > 3 const auto& arg3 = new_args.size() > 3
? new_args.at(3) ? new_args.at(3)
: ngraph::op::Constant::create(element::Type_t::f32, Shape{}, {.0f}); : ngraph::op::Constant::create(element::f32, Shape{}, {.0f});
const auto& arg4 = new_args.size() > 4 const auto& arg4 = new_args.size() > 4
? new_args.at(4) ? new_args.at(4)
: ngraph::op::Constant::create(element::Type_t::f32, Shape{}, {.0f}); : ngraph::op::Constant::create(element::f32, Shape{}, {.0f});
return std::make_shared<op::v4::NonMaxSuppression>(new_args.at(0), return std::make_shared<op::v4::NonMaxSuppression>(new_args.at(0),
new_args.at(1), new_args.at(1),
@ -694,7 +693,7 @@ namespace
inline bool is_float_type_admissible(const element::Type& t) inline bool is_float_type_admissible(const element::Type& t)
{ {
return t == element::Type_t::f32 || t == element::Type_t::f16 || t == element::Type_t::bf16; return t == element::f32 || t == element::f16 || t == element::bf16;
} }
inline bool is_scalar_or_1d_tensor_with_1_element(const PartialShape& p) inline bool is_scalar_or_1d_tensor_with_1_element(const PartialShape& p)
@ -716,8 +715,7 @@ void op::v5::NonMaxSuppression::validate()
const auto scores_ps = get_input_partial_shape(1); const auto scores_ps = get_input_partial_shape(1);
NODE_VALIDATION_CHECK(this, NODE_VALIDATION_CHECK(this,
m_output_type == element::Type_t::i64 || m_output_type == element::i64 || m_output_type == element::i32,
m_output_type == element::Type_t::i32,
"Output type must be i32 or i64"); "Output type must be i32 or i64");
if (boxes_ps.is_dynamic() || scores_ps.is_dynamic()) if (boxes_ps.is_dynamic() || scores_ps.is_dynamic())
@ -922,7 +920,7 @@ void op::v5::NonMaxSuppression::validate_and_infer_types()
} }
set_output_type(0, m_output_type, out_shape); set_output_type(0, m_output_type, out_shape);
set_output_type(1, element::Type_t::f32, out_shape); set_output_type(1, element::f32, out_shape);
set_output_type(2, m_output_type, Shape{1}); set_output_type(2, m_output_type, Shape{1});
} }

View File

@ -62,8 +62,7 @@ void op::v3::NonZero::validate_and_infer_types()
"NonZero input data type needs to be a numeric type. Got: ", "NonZero input data type needs to be a numeric type. Got: ",
input_et); input_et);
NODE_VALIDATION_CHECK(this, NODE_VALIDATION_CHECK(this,
m_output_type == element::Type_t::i64 || m_output_type == element::i64 || m_output_type == element::i32,
m_output_type == element::Type_t::i32,
"Output type must be i32 or i64"); "Output type must be i32 or i64");
// For scalar non-zero value case, onnx test case expects output shape {1, 1} // For scalar non-zero value case, onnx test case expects output shape {1, 1}

View File

@ -47,7 +47,7 @@ namespace not_equalop
const op::AutoBroadcastSpec& broadcast_spec) const op::AutoBroadcastSpec& broadcast_spec)
{ {
bool rc = true; bool rc = true;
out->set_broadcast(broadcast_spec, arg0, arg1, element::Type_t::boolean); out->set_broadcast(broadcast_spec, arg0, arg1, element::boolean);
switch (arg0->get_element_type()) switch (arg0->get_element_type())
{ {
TYPE_CASE(boolean)(arg0, arg1, out, broadcast_spec); TYPE_CASE(boolean)(arg0, arg1, out, broadcast_spec);

View File

@ -72,14 +72,14 @@ void op::PriorBox::validate_and_infer_types()
auto layer_shape = const_shape->get_shape_val(); auto layer_shape = const_shape->get_shape_val();
set_output_type(0, set_output_type(0,
element::Type_t::f32, element::f32,
Shape{2, Shape{2,
4 * layer_shape[0] * layer_shape[1] * 4 * layer_shape[0] * layer_shape[1] *
static_cast<size_t>(number_of_priors(m_attrs))}); static_cast<size_t>(number_of_priors(m_attrs))});
} }
else else
{ {
set_output_type(0, element::Type_t::f32, PartialShape::dynamic()); set_output_type(0, element::f32, PartialShape::dynamic());
} }
} }

View File

@ -80,11 +80,11 @@ void op::PriorBoxClustered::validate_and_infer_types()
// {Prior boxes, variances-adjusted prior boxes} // {Prior boxes, variances-adjusted prior boxes}
const auto num_priors = m_attrs.widths.size(); const auto num_priors = m_attrs.widths.size();
set_output_type( set_output_type(
0, element::Type_t::f32, Shape{2, 4 * layer_shape[0] * layer_shape[1] * num_priors}); 0, element::f32, Shape{2, 4 * layer_shape[0] * layer_shape[1] * num_priors});
} }
else else
{ {
set_output_type(0, element::Type_t::f32, PartialShape::dynamic()); set_output_type(0, element::f32, PartialShape::dynamic());
} }
} }

View File

@ -363,7 +363,7 @@ void op::v0::Range::validate_and_infer_types()
set_input_is_relevant_to_shape(1); set_input_is_relevant_to_shape(1);
set_input_is_relevant_to_shape(2); set_input_is_relevant_to_shape(2);
element::Type result_et = element::Type_t::dynamic; auto result_et = element::dynamic;
NODE_VALIDATION_CHECK( NODE_VALIDATION_CHECK(
this, this,
@ -373,7 +373,7 @@ void op::v0::Range::validate_and_infer_types()
"Element types for start, stop, and step do not match."); "Element types for start, stop, and step do not match.");
NODE_VALIDATION_CHECK(this, NODE_VALIDATION_CHECK(this,
result_et != element::Type_t::boolean, result_et != element::boolean,
"Element type for start, stop, and step, must not be boolean."); "Element type for start, stop, and step, must not be boolean.");
NODE_VALIDATION_CHECK( NODE_VALIDATION_CHECK(

View File

@ -76,7 +76,7 @@ bool op::v1::ReduceLogicalAnd::evaluate(const HostTensorVector& outputs,
const auto& axes = inputs[1]; const auto& axes = inputs[1];
const auto& out = outputs[0]; const auto& out = outputs[0];
if (data->get_element_type() != element::Type_t::boolean || if (data->get_element_type() != element::boolean ||
!axes->get_element_type().is_integral_number()) !axes->get_element_type().is_integral_number())
{ {
return false; return false;

View File

@ -76,7 +76,7 @@ bool op::v1::ReduceLogicalOr::evaluate(const HostTensorVector& outputs,
const auto& axes = inputs[1]; const auto& axes = inputs[1];
const auto& out = outputs[0]; const auto& out = outputs[0];
if (data->get_element_type() != element::Type_t::boolean || if (data->get_element_type() != element::boolean ||
!axes->get_element_type().is_integral_number()) !axes->get_element_type().is_integral_number())
{ {
return false; return false;

View File

@ -59,7 +59,7 @@ void op::v1::Reverse::validate_and_infer_types()
if (m_mode == Mode::MASK) if (m_mode == Mode::MASK)
{ {
NODE_VALIDATION_CHECK(this, NODE_VALIDATION_CHECK(this,
get_input_element_type(1) == element::Type_t::boolean, get_input_element_type(1) == element::boolean,
"In 'mask' mode the second input must contain boolean values."); "In 'mask' mode the second input must contain boolean values.");
} }

View File

@ -92,7 +92,7 @@ void op::v0::RNNCell::validate_and_infer_types()
} }
auto merged_batch_size = Dimension::dynamic(); auto merged_batch_size = Dimension::dynamic();
auto merged_hidden_size = Dimension::dynamic(); auto merged_hidden_size = Dimension::dynamic();
element::Type result_et = element::Type_t::dynamic; auto result_et = element::dynamic;
// Get input partial shape for all inputs // Get input partial shape for all inputs
const auto& x_pshape = get_input_partial_shape(0); const auto& x_pshape = get_input_partial_shape(0);

View File

@ -71,7 +71,7 @@ void op::v5::RNNSequence::validate_and_infer_types()
auto merged_batch_size = Dimension::dynamic(); auto merged_batch_size = Dimension::dynamic();
auto merged_hidden_size = Dimension::dynamic(); auto merged_hidden_size = Dimension::dynamic();
auto merged_num_directions = Dimension::dynamic(); auto merged_num_directions = Dimension::dynamic();
element::Type result_et = element::Type_t::dynamic; auto result_et = element::dynamic;
auto x_pshape = get_input_partial_shape(0); auto x_pshape = get_input_partial_shape(0);
auto ht_pshape = get_input_partial_shape(1); auto ht_pshape = get_input_partial_shape(1);

View File

@ -46,7 +46,7 @@ void op::v1::Select::validate_and_infer_types()
// Condition element type check // Condition element type check
NODE_VALIDATION_CHECK(this, NODE_VALIDATION_CHECK(this,
get_input_element_type(0).is_dynamic() || get_input_element_type(0).is_dynamic() ||
get_input_element_type(0) == element::Type_t::boolean, get_input_element_type(0) == element::boolean,
"Argument 0 must have boolean element type (element type: ", "Argument 0 must have boolean element type (element type: ",
get_input_element_type(0), get_input_element_type(0),
")."); ").");

View File

@ -42,8 +42,7 @@ op::v3::ShapeOf::ShapeOf(const Output<Node>& arg, element::Type output_type)
void op::v3::ShapeOf::validate_and_infer_types() void op::v3::ShapeOf::validate_and_infer_types()
{ {
NODE_VALIDATION_CHECK(this, NODE_VALIDATION_CHECK(this,
m_output_type == element::Type_t::i64 || m_output_type == element::i64 || m_output_type == element::i32,
m_output_type == element::Type_t::i32,
"Output type must be i32 or i64"); "Output type must be i32 or i64");
set_input_is_relevant_to_value(0, false); set_input_is_relevant_to_value(0, false);
set_output_type(0, m_output_type, PartialShape{get_input_partial_shape(0).rank()}); set_output_type(0, m_output_type, PartialShape{get_input_partial_shape(0).rank()});
@ -142,7 +141,7 @@ namespace shape_of
auto index = std::make_shared<op::v0::Constant>( auto index = std::make_shared<op::v0::Constant>(
output_type, Shape{1}, std::vector<int64_t>{i}); output_type, Shape{1}, std::vector<int64_t>{i});
auto axis = std::make_shared<op::v0::Constant>( auto axis = std::make_shared<op::v0::Constant>(
element::Type_t::i64, Shape{}, std::vector<int64_t>{0}); element::i64, Shape{}, std::vector<int64_t>{0});
auto temp = make_shared<op::v1::Gather>(shape_of, index, axis); auto temp = make_shared<op::v1::Gather>(shape_of, index, axis);
temp->set_friendly_name("DynDim/" + temp->get_name()); temp->set_friendly_name("DynDim/" + temp->get_name());
dimensions.push_back(temp); dimensions.push_back(temp);
@ -183,7 +182,7 @@ op::v0::ShapeOf::ShapeOf(const Output<Node>& arg)
void op::v0::ShapeOf::validate_and_infer_types() void op::v0::ShapeOf::validate_and_infer_types()
{ {
set_input_is_relevant_to_value(0, false); set_input_is_relevant_to_value(0, false);
set_output_type(0, element::Type_t::i64, PartialShape{get_input_partial_shape(0).rank()}); set_output_type(0, element::i64, PartialShape{get_input_partial_shape(0).rank()});
} }
bool ngraph::op::v0::ShapeOf::visit_attributes(AttributeVisitor& visitor) bool ngraph::op::v0::ShapeOf::visit_attributes(AttributeVisitor& visitor)

View File

@ -126,7 +126,7 @@ OutputVector op::Squeeze::decompose_op() const
auto output_data_shape = get_output_shape(0); auto output_data_shape = get_output_shape(0);
return {make_shared<op::v1::Reshape>( return {make_shared<op::v1::Reshape>(
data, data,
op::Constant::create(element::Type_t::u64, {output_data_shape.size()}, output_data_shape), op::Constant::create(element::u64, {output_data_shape.size()}, output_data_shape),
false)}; false)};
} }

View File

@ -77,13 +77,12 @@ namespace
{ {
NGRAPH_CHECK(begin_pshape.rank().is_static() && begin_pshape.rank().get_length() == 1, NGRAPH_CHECK(begin_pshape.rank().is_static() && begin_pshape.rank().get_length() == 1,
"Begin input must be 1D"); "Begin input must be 1D");
return std::make_shared<op::v1::Broadcast>( return std::make_shared<op::v1::Broadcast>(op::Constant::create(element::i64, {}, {1}),
op::Constant::create(element::Type_t::i64, {}, {1}), std::make_shared<op::ShapeOf>(begin));
std::make_shared<op::ShapeOf>(begin));
} }
return op::Constant::create( return op::Constant::create(
element::Type_t::i64, Shape{strides_length}, vector<int64_t>(strides_length, 1)); element::i64, Shape{strides_length}, vector<int64_t>(strides_length, 1));
} }
} }

View File

@ -320,9 +320,8 @@ size_t op::v1::TopK::read_k_from_constant_node(const shared_ptr<Node>& node,
const element::Type& k_element_type) const const element::Type& k_element_type) const
{ {
NODE_VALIDATION_CHECK(this, NODE_VALIDATION_CHECK(this,
k_element_type == element::Type_t::i8 || k_element_type == element::i8 || k_element_type == element::i32 ||
k_element_type == element::Type_t::i32 || k_element_type == element::i64,
k_element_type == element::Type_t::i64,
"K input element type must be i8, i32 or i64 (got ", "K input element type must be i8, i32 or i64 (got ",
k_element_type, k_element_type,
")."); ").");
@ -401,7 +400,7 @@ size_t op::v1::TopK::get_k() const
void op::v1::TopK::set_k(size_t k) void op::v1::TopK::set_k(size_t k)
{ {
this->input(1).replace_source_output( this->input(1).replace_source_output(
op::Constant::create(element::Type_t::i64, Shape{}, {k})->output(0)); op::Constant::create(element::i64, Shape{}, {k})->output(0));
} }
bool op::v1::TopK::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const bool op::v1::TopK::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const

View File

@ -29,7 +29,7 @@ op::util::ArithmeticReduction::ArithmeticReduction(const Output<Node>& arg,
const AxisSet& reduction_axes) const AxisSet& reduction_axes)
: Op({arg, : Op({arg,
op::Constant::create( op::Constant::create(
element::Type_t::i64, Shape{reduction_axes.size()}, reduction_axes.to_vector()) element::i64, Shape{reduction_axes.size()}, reduction_axes.to_vector())
->output(0)}) ->output(0)})
{ {
add_provenance_group_member(input_value(1).get_node_shared_ptr()); add_provenance_group_member(input_value(1).get_node_shared_ptr());
@ -62,10 +62,9 @@ const AxisSet op::util::ArithmeticReduction::get_reduction_axes() const
void op::util::ArithmeticReduction::set_reduction_axes(const AxisSet& reduction_axes) void op::util::ArithmeticReduction::set_reduction_axes(const AxisSet& reduction_axes)
{ {
this->input(1).replace_source_output(op::Constant::create(element::Type_t::i64, this->input(1).replace_source_output(
Shape{reduction_axes.size()}, op::Constant::create(element::i64, Shape{reduction_axes.size()}, reduction_axes.to_vector())
reduction_axes.to_vector()) ->output(0));
->output(0));
} }
void op::util::ArithmeticReduction::validate_and_infer_types() void op::util::ArithmeticReduction::validate_and_infer_types()

View File

@ -44,7 +44,7 @@ void op::util::BinaryElementwiseArithmetic::validate_and_infer_elementwise_arith
PartialShape& args_pshape = std::get<1>(args_et_pshape); PartialShape& args_pshape = std::get<1>(args_et_pshape);
NODE_VALIDATION_CHECK(this, NODE_VALIDATION_CHECK(this,
args_et.is_dynamic() || args_et != element::Type_t::boolean, args_et.is_dynamic() || args_et != element::boolean,
"Arguments cannot have boolean element type (argument element type: ", "Arguments cannot have boolean element type (argument element type: ",
args_et, args_et,
")."); ").");

View File

@ -39,7 +39,7 @@ void op::util::BinaryElementwiseComparison::validate_and_infer_types()
auto args_et_pshape = op::util::validate_and_infer_elementwise_args(this, m_autob); auto args_et_pshape = op::util::validate_and_infer_elementwise_args(this, m_autob);
PartialShape& args_pshape = std::get<1>(args_et_pshape); PartialShape& args_pshape = std::get<1>(args_et_pshape);
set_output_type(0, element::Type_t::boolean, args_pshape); set_output_type(0, element::boolean, args_pshape);
} }
bool op::util::BinaryElementwiseComparison::visit_attributes(AttributeVisitor& visitor) bool op::util::BinaryElementwiseComparison::visit_attributes(AttributeVisitor& visitor)

View File

@ -44,12 +44,12 @@ void op::util::BinaryElementwiseLogical::validate_and_infer_elementwise_logical(
NODE_VALIDATION_CHECK( NODE_VALIDATION_CHECK(
this, this,
args_et.is_dynamic() || args_et == element::Type_t::boolean, args_et.is_dynamic() || args_et == element::boolean,
"Operands for logical operators must have boolean element type but have element type ", "Operands for logical operators must have boolean element type but have element type ",
args_et, args_et,
"."); ".");
set_output_type(0, element::Type_t::boolean, args_pshape); set_output_type(0, element::boolean, args_pshape);
} }
void op::util::BinaryElementwiseLogical::validate_and_infer_types() void op::util::BinaryElementwiseLogical::validate_and_infer_types()

View File

@ -52,13 +52,13 @@ op::util::EmbeddingBagOffsetsBase::EmbeddingBagOffsetsBase(const Output<Node>& e
void op::util::EmbeddingBagOffsetsBase::validate_and_infer_types() void op::util::EmbeddingBagOffsetsBase::validate_and_infer_types()
{ {
NODE_VALIDATION_CHECK(this, NODE_VALIDATION_CHECK(this,
get_input_element_type(OFFSETS) == element::Type_t::i64 || get_input_element_type(OFFSETS) == element::i64 ||
get_input_element_type(OFFSETS) == element::Type_t::i32, get_input_element_type(OFFSETS) == element::i32,
"OFFSETS type must be i32 or i64"); "OFFSETS type must be i32 or i64");
NODE_VALIDATION_CHECK(this, NODE_VALIDATION_CHECK(this,
get_input_element_type(INDICES) == element::Type_t::i64 || get_input_element_type(INDICES) == element::i64 ||
get_input_element_type(INDICES) == element::Type_t::i32, get_input_element_type(INDICES) == element::i32,
"INDICES type must be i32 or i64"); "INDICES type must be i32 or i64");
NODE_VALIDATION_CHECK( NODE_VALIDATION_CHECK(
@ -83,8 +83,8 @@ void op::util::EmbeddingBagOffsetsBase::validate_and_infer_types()
if (get_input_size() >= 4) if (get_input_size() >= 4)
{ {
NODE_VALIDATION_CHECK(this, NODE_VALIDATION_CHECK(this,
get_input_element_type(DEFAULT_INDEX) == element::Type_t::i64 || get_input_element_type(DEFAULT_INDEX) == element::i64 ||
get_input_element_type(DEFAULT_INDEX) == element::Type_t::i32, get_input_element_type(DEFAULT_INDEX) == element::i32,
"DEFAULT_INDEX type must be i32 or i64"); "DEFAULT_INDEX type must be i32 or i64");
NODE_VALIDATION_CHECK( NODE_VALIDATION_CHECK(

View File

@ -40,8 +40,8 @@ op::util::EmbeddingBagPackedBase::EmbeddingBagPackedBase(const Output<Node>& emb
void op::util::EmbeddingBagPackedBase::validate_and_infer_types() void op::util::EmbeddingBagPackedBase::validate_and_infer_types()
{ {
NODE_VALIDATION_CHECK(this, NODE_VALIDATION_CHECK(this,
get_input_element_type(INDICES) == element::Type_t::i64 || get_input_element_type(INDICES) == element::i64 ||
get_input_element_type(INDICES) == element::Type_t::i32, get_input_element_type(INDICES) == element::i32,
"INDICES type must be i32 or i64"); "INDICES type must be i32 or i64");
NODE_VALIDATION_CHECK(this, NODE_VALIDATION_CHECK(this,

View File

@ -68,8 +68,8 @@ void op::util::IndexReduction::validate_and_infer_types()
rank, rank,
")."); ").");
NODE_VALIDATION_CHECK(this, NODE_VALIDATION_CHECK(this,
m_index_element_type == element::Type_t::i32 || m_index_element_type == element::i32 ||
m_index_element_type == element::Type_t::i64, m_index_element_type == element::i64,
"Index element is neither i64 or i32."); "Index element is neither i64 or i32.");
PartialShape output_shape{PartialShape::dynamic()}; PartialShape output_shape{PartialShape::dynamic()};

View File

@ -28,7 +28,7 @@ op::util::LogicalReduction::LogicalReduction()
op::util::LogicalReduction::LogicalReduction(const Output<Node>& arg, const AxisSet& reduction_axes) op::util::LogicalReduction::LogicalReduction(const Output<Node>& arg, const AxisSet& reduction_axes)
: Op({arg, : Op({arg,
op::Constant::create( op::Constant::create(
element::Type_t::i64, Shape{reduction_axes.size()}, reduction_axes.to_vector()) element::i64, Shape{reduction_axes.size()}, reduction_axes.to_vector())
->output(0)}) ->output(0)})
{ {
add_provenance_group_member(input_value(1).get_node_shared_ptr()); add_provenance_group_member(input_value(1).get_node_shared_ptr());
@ -57,10 +57,9 @@ const AxisSet op::util::LogicalReduction::get_reduction_axes() const
void op::util::LogicalReduction::set_reduction_axes(const AxisSet& reduction_axes) void op::util::LogicalReduction::set_reduction_axes(const AxisSet& reduction_axes)
{ {
this->input(1).replace_source_output(op::Constant::create(element::Type_t::i64, this->input(1).replace_source_output(
Shape{reduction_axes.size()}, op::Constant::create(element::i64, Shape{reduction_axes.size()}, reduction_axes.to_vector())
reduction_axes.to_vector()) ->output(0));
->output(0));
} }
void op::util::LogicalReduction::validate_and_infer_types() void op::util::LogicalReduction::validate_and_infer_types()
@ -112,8 +111,8 @@ void op::util::LogicalReduction::validate_and_infer_types()
set_input_is_relevant_to_shape(1); set_input_is_relevant_to_shape(1);
NODE_VALIDATION_CHECK(this, NODE_VALIDATION_CHECK(this,
get_input_element_type(0).compatible(element::Type_t::boolean), get_input_element_type(0).compatible(element::boolean),
"Input element type must be boolean."); "Input element type must be boolean.");
set_output_type(0, element::Type_t::boolean, result_shape); set_output_type(0, element::boolean, result_shape);
} }

View File

@ -46,7 +46,7 @@ std::shared_ptr<Node> ngraph::op::util::convert_lstm_node_format(const Output<No
const auto& to = gate_order_map.at(to_format); const auto& to = gate_order_map.at(to_format);
size_t num_gates = 4; size_t num_gates = 4;
auto axis_const = std::make_shared<opset4::Constant>(element::Type_t::i64, Shape{}, axis); auto axis_const = std::make_shared<opset4::Constant>(element::i64, Shape{}, axis);
OutputVector splitted_node = OutputVector splitted_node =
std::make_shared<opset4::Split>(node, axis_const, num_gates)->outputs(); std::make_shared<opset4::Split>(node, axis_const, num_gates)->outputs();
OutputVector nodes_in_new_format(num_gates); OutputVector nodes_in_new_format(num_gates);

View File

@ -50,7 +50,7 @@ void op::util::ScatterNDBase::validate_and_infer_types()
const PartialShape& updates_shape = get_input_partial_shape(UPDATES); const PartialShape& updates_shape = get_input_partial_shape(UPDATES);
NODE_VALIDATION_CHECK(this, NODE_VALIDATION_CHECK(this,
indices_et == element::Type_t::i32 || indices_et == element::Type_t::i64, indices_et == element::i32 || indices_et == element::i64,
"Indices element type must be i64 or i32"); "Indices element type must be i64 or i32");
NODE_VALIDATION_CHECK( NODE_VALIDATION_CHECK(

View File

@ -36,7 +36,7 @@ void op::util::UnaryElementwiseArithmetic::validate_and_infer_elementwise_arithm
PartialShape& args_pshape = std::get<1>(args_et_pshape); PartialShape& args_pshape = std::get<1>(args_et_pshape);
NODE_VALIDATION_CHECK(this, NODE_VALIDATION_CHECK(this,
args_et.is_dynamic() || args_et != element::Type_t::boolean, args_et.is_dynamic() || args_et != element::boolean,
"Arguments cannot have boolean element type (argument element type: ", "Arguments cannot have boolean element type (argument element type: ",
args_et, args_et,
")."); ").");

View File

@ -25,8 +25,8 @@ NGRAPH_RTTI_DEFINITION(ngraph::pass::ConvertFP32ToFP16, "ConvertFP32ToFP16", 0);
void pass::ConvertFP32ToFP16::convert_constants_precision() void pass::ConvertFP32ToFP16::convert_constants_precision()
{ {
auto constant = std::make_shared<ngraph::op::Constant>( auto constant =
element::Type_t::f32, Shape{1}, std::vector<float>{0}); std::make_shared<ngraph::op::Constant>(element::f32, Shape{1}, std::vector<float>{0});
ngraph::graph_rewrite_callback callback = [](pattern::Matcher& m) { ngraph::graph_rewrite_callback callback = [](pattern::Matcher& m) {
auto constant = std::dynamic_pointer_cast<ngraph::op::Constant>(m.get_match_root()); auto constant = std::dynamic_pointer_cast<ngraph::op::Constant>(m.get_match_root());
@ -35,7 +35,7 @@ void pass::ConvertFP32ToFP16::convert_constants_precision()
return false; return false;
} }
if (constant->get_element_type() == element::Type_t::f32) if (constant->get_element_type() == element::f32)
{ {
auto data = constant->get_vector<float>(); auto data = constant->get_vector<float>();
std::vector<ngraph::float16> new_data(data.size()); std::vector<ngraph::float16> new_data(data.size());
@ -44,7 +44,7 @@ void pass::ConvertFP32ToFP16::convert_constants_precision()
new_data[i] = ngraph::float16(data[i]); new_data[i] = ngraph::float16(data[i]);
} }
auto new_const = std::make_shared<ngraph::op::Constant>( auto new_const = std::make_shared<ngraph::op::Constant>(
element::Type_t::f16, constant->get_shape(), new_data); element::f16, constant->get_shape(), new_data);
new_const->set_friendly_name(constant->get_friendly_name()); new_const->set_friendly_name(constant->get_friendly_name());
ngraph::replace_node(constant, new_const); ngraph::replace_node(constant, new_const);
return true; return true;
@ -60,13 +60,13 @@ void pass::ConvertFP32ToFP16::convert_constants_precision()
void pass::ConvertFP32ToFP16::convert_parameters_precision() void pass::ConvertFP32ToFP16::convert_parameters_precision()
{ {
auto constant = std::make_shared<ngraph::op::Parameter>(element::Type_t::f32, Shape{1}); auto constant = std::make_shared<ngraph::op::Parameter>(element::f32, Shape{1});
ngraph::graph_rewrite_callback callback = [](pattern::Matcher& m) { ngraph::graph_rewrite_callback callback = [](pattern::Matcher& m) {
auto parameter = std::dynamic_pointer_cast<ngraph::op::Parameter>(m.get_match_root()); auto parameter = std::dynamic_pointer_cast<ngraph::op::Parameter>(m.get_match_root());
if (parameter && parameter->get_element_type() == element::Type_t::f32) if (parameter && parameter->get_element_type() == element::f32)
{ {
parameter->set_element_type(element::Type_t::f16); parameter->set_element_type(element::f16);
return true; return true;
} }
return false; return false;

View File

@ -68,6 +68,5 @@ std::shared_ptr<Node> pattern::any_input()
std::shared_ptr<Node> pattern::any_input(const pattern::op::ValuePredicate& pred) std::shared_ptr<Node> pattern::any_input(const pattern::op::ValuePredicate& pred)
{ {
return std::make_shared<pattern::op::Label>( return std::make_shared<pattern::op::Label>(element::dynamic, PartialShape::dynamic(), pred);
element::Type_t::dynamic, PartialShape::dynamic(), pred);
} }

View File

@ -62,7 +62,7 @@ runtime::HostTensor::HostTensor(const element::Type& element_type,
} }
runtime::HostTensor::HostTensor(const std::string& name) runtime::HostTensor::HostTensor(const std::string& name)
: HostTensor(element::Type_t::dynamic, PartialShape::dynamic()) : HostTensor(element::dynamic, PartialShape::dynamic())
{ {
} }

View File

@ -85,6 +85,26 @@ static const element_types_map_t& get_type_info_map()
return s_type_info_map; return s_type_info_map;
}; };
std::vector<const element::Type*> element::Type::get_known_types()
{
std::vector<const element::Type*> rc = {&element::dynamic,
&element::boolean,
&element::bf16,
&element::f16,
&element::f32,
&element::f64,
&element::i8,
&element::i16,
&element::i32,
&element::i64,
&element::u1,
&element::u8,
&element::u16,
&element::u32,
&element::u64};
return rc;
}
element::Type::Type(size_t bitwidth, element::Type::Type(size_t bitwidth,
bool is_real, bool is_real,
bool is_signed, bool is_signed,
@ -245,7 +265,7 @@ bool element::Type::is_real() const
bool element::Type::is_integral_number() const bool element::Type::is_integral_number() const
{ {
return is_integral() && (m_type != element::Type_t::boolean); return is_integral() && (m_type != element::boolean);
} }
bool element::Type::is_signed() const bool element::Type::is_signed() const

View File

@ -481,7 +481,7 @@ vector<float> read_float_vector(shared_ptr<runtime::Tensor> tv)
vector<float> float_vec; vector<float> float_vec;
element::Type element_type = tv->get_element_type(); element::Type element_type = tv->get_element_type();
if (element_type == element::Type_t::boolean) if (element_type == element::boolean)
{ {
vector<char> vec = read_vector<char>(tv); vector<char> vec = read_vector<char>(tv);
// Changed from vector ctor to explicit for loop to add static_cast // Changed from vector ctor to explicit for loop to add static_cast
@ -491,12 +491,12 @@ vector<float> read_float_vector(shared_ptr<runtime::Tensor> tv)
float_vec.push_back(static_cast<float>(value)); float_vec.push_back(static_cast<float>(value));
} }
} }
else if (element_type == element::Type_t::bf16) else if (element_type == element::bf16)
{ {
vector<bfloat16> vec = read_vector<bfloat16>(tv); vector<bfloat16> vec = read_vector<bfloat16>(tv);
float_vec = bfloat16::to_float_vector(vec); float_vec = bfloat16::to_float_vector(vec);
} }
else if (element_type == element::Type_t::f16) else if (element_type == element::f16)
{ {
vector<float16> vec = read_vector<float16>(tv); vector<float16> vec = read_vector<float16>(tv);
for (float16 value : vec) for (float16 value : vec)
@ -504,7 +504,7 @@ vector<float> read_float_vector(shared_ptr<runtime::Tensor> tv)
float_vec.push_back(static_cast<float>(value)); float_vec.push_back(static_cast<float>(value));
} }
} }
else if (element_type == element::Type_t::f32) else if (element_type == element::f32)
{ {
vector<float> vec = read_vector<float>(tv); vector<float> vec = read_vector<float>(tv);
for (float value : vec) for (float value : vec)
@ -512,7 +512,7 @@ vector<float> read_float_vector(shared_ptr<runtime::Tensor> tv)
float_vec.push_back(static_cast<float>(value)); float_vec.push_back(static_cast<float>(value));
} }
} }
else if (element_type == element::Type_t::f64) else if (element_type == element::f64)
{ {
vector<double> vec = read_vector<double>(tv); vector<double> vec = read_vector<double>(tv);
for (double value : vec) for (double value : vec)
@ -520,7 +520,7 @@ vector<float> read_float_vector(shared_ptr<runtime::Tensor> tv)
float_vec.push_back(static_cast<float>(value)); float_vec.push_back(static_cast<float>(value));
} }
} }
else if (element_type == element::Type_t::i8) else if (element_type == element::i8)
{ {
vector<int8_t> vec = read_vector<int8_t>(tv); vector<int8_t> vec = read_vector<int8_t>(tv);
for (int8_t value : vec) for (int8_t value : vec)
@ -528,7 +528,7 @@ vector<float> read_float_vector(shared_ptr<runtime::Tensor> tv)
float_vec.push_back(static_cast<float>(value)); float_vec.push_back(static_cast<float>(value));
} }
} }
else if (element_type == element::Type_t::i16) else if (element_type == element::i16)
{ {
vector<int16_t> vec = read_vector<int16_t>(tv); vector<int16_t> vec = read_vector<int16_t>(tv);
for (int16_t value : vec) for (int16_t value : vec)
@ -536,7 +536,7 @@ vector<float> read_float_vector(shared_ptr<runtime::Tensor> tv)
float_vec.push_back(static_cast<float>(value)); float_vec.push_back(static_cast<float>(value));
} }
} }
else if (element_type == element::Type_t::i32) else if (element_type == element::i32)
{ {
vector<int32_t> vec = read_vector<int32_t>(tv); vector<int32_t> vec = read_vector<int32_t>(tv);
for (int32_t value : vec) for (int32_t value : vec)
@ -544,7 +544,7 @@ vector<float> read_float_vector(shared_ptr<runtime::Tensor> tv)
float_vec.push_back(static_cast<float>(value)); float_vec.push_back(static_cast<float>(value));
} }
} }
else if (element_type == element::Type_t::i64) else if (element_type == element::i64)
{ {
vector<int64_t> vec = read_vector<int64_t>(tv); vector<int64_t> vec = read_vector<int64_t>(tv);
for (int64_t value : vec) for (int64_t value : vec)
@ -552,7 +552,7 @@ vector<float> read_float_vector(shared_ptr<runtime::Tensor> tv)
float_vec.push_back(static_cast<float>(value)); float_vec.push_back(static_cast<float>(value));
} }
} }
else if (element_type == element::Type_t::u8) else if (element_type == element::u8)
{ {
vector<uint8_t> vec = read_vector<uint8_t>(tv); vector<uint8_t> vec = read_vector<uint8_t>(tv);
for (uint8_t value : vec) for (uint8_t value : vec)
@ -560,7 +560,7 @@ vector<float> read_float_vector(shared_ptr<runtime::Tensor> tv)
float_vec.push_back(static_cast<float>(value)); float_vec.push_back(static_cast<float>(value));
} }
} }
else if (element_type == element::Type_t::u16) else if (element_type == element::u16)
{ {
vector<uint16_t> vec = read_vector<uint16_t>(tv); vector<uint16_t> vec = read_vector<uint16_t>(tv);
for (uint16_t value : vec) for (uint16_t value : vec)
@ -568,7 +568,7 @@ vector<float> read_float_vector(shared_ptr<runtime::Tensor> tv)
float_vec.push_back(static_cast<float>(value)); float_vec.push_back(static_cast<float>(value));
} }
} }
else if (element_type == element::Type_t::u32) else if (element_type == element::u32)
{ {
vector<uint32_t> vec = read_vector<uint32_t>(tv); vector<uint32_t> vec = read_vector<uint32_t>(tv);
for (uint32_t value : vec) for (uint32_t value : vec)
@ -576,7 +576,7 @@ vector<float> read_float_vector(shared_ptr<runtime::Tensor> tv)
float_vec.push_back(static_cast<float>(value)); float_vec.push_back(static_cast<float>(value));
} }
} }
else if (element_type == element::Type_t::u64) else if (element_type == element::u64)
{ {
vector<uint64_t> vec = read_vector<uint64_t>(tv); vector<uint64_t> vec = read_vector<uint64_t>(tv);
for (uint64_t value : vec) for (uint64_t value : vec)
@ -597,7 +597,7 @@ vector<int64_t> read_index_vector(shared_ptr<runtime::Tensor> tv)
vector<int64_t> index_vec; vector<int64_t> index_vec;
element::Type element_type = tv->get_element_type(); element::Type element_type = tv->get_element_type();
if (element_type == element::Type_t::boolean) if (element_type == element::boolean)
{ {
vector<char> vec = read_vector<char>(tv); vector<char> vec = read_vector<char>(tv);
// Changed from vector ctor to explicit for loop to add static_cast // Changed from vector ctor to explicit for loop to add static_cast
@ -607,7 +607,7 @@ vector<int64_t> read_index_vector(shared_ptr<runtime::Tensor> tv)
index_vec.push_back(static_cast<int64_t>(value)); index_vec.push_back(static_cast<int64_t>(value));
} }
} }
else if (element_type == element::Type_t::bf16) else if (element_type == element::bf16)
{ {
vector<bfloat16> vec = read_vector<bfloat16>(tv); vector<bfloat16> vec = read_vector<bfloat16>(tv);
vector<float> float_vec = bfloat16::to_float_vector(vec); vector<float> float_vec = bfloat16::to_float_vector(vec);
@ -616,7 +616,7 @@ vector<int64_t> read_index_vector(shared_ptr<runtime::Tensor> tv)
index_vec.push_back(static_cast<int64_t>(value)); index_vec.push_back(static_cast<int64_t>(value));
} }
} }
else if (element_type == element::Type_t::f16) else if (element_type == element::f16)
{ {
vector<float16> vec = read_vector<float16>(tv); vector<float16> vec = read_vector<float16>(tv);
for (float16 value : vec) for (float16 value : vec)
@ -624,7 +624,7 @@ vector<int64_t> read_index_vector(shared_ptr<runtime::Tensor> tv)
index_vec.push_back(static_cast<int64_t>(static_cast<float>(value))); index_vec.push_back(static_cast<int64_t>(static_cast<float>(value)));
} }
} }
else if (element_type == element::Type_t::f32) else if (element_type == element::f32)
{ {
vector<float> vec = read_vector<float>(tv); vector<float> vec = read_vector<float>(tv);
for (float value : vec) for (float value : vec)
@ -632,7 +632,7 @@ vector<int64_t> read_index_vector(shared_ptr<runtime::Tensor> tv)
index_vec.push_back(static_cast<int64_t>(value)); index_vec.push_back(static_cast<int64_t>(value));
} }
} }
else if (element_type == element::Type_t::f64) else if (element_type == element::f64)
{ {
vector<double> vec = read_vector<double>(tv); vector<double> vec = read_vector<double>(tv);
for (double value : vec) for (double value : vec)
@ -640,7 +640,7 @@ vector<int64_t> read_index_vector(shared_ptr<runtime::Tensor> tv)
index_vec.push_back(static_cast<int64_t>(value)); index_vec.push_back(static_cast<int64_t>(value));
} }
} }
else if (element_type == element::Type_t::i8) else if (element_type == element::i8)
{ {
vector<int8_t> vec = read_vector<int8_t>(tv); vector<int8_t> vec = read_vector<int8_t>(tv);
for (int8_t value : vec) for (int8_t value : vec)
@ -648,7 +648,7 @@ vector<int64_t> read_index_vector(shared_ptr<runtime::Tensor> tv)
index_vec.push_back(static_cast<int64_t>(value)); index_vec.push_back(static_cast<int64_t>(value));
} }
} }
else if (element_type == element::Type_t::i16) else if (element_type == element::i16)
{ {
vector<int16_t> vec = read_vector<int16_t>(tv); vector<int16_t> vec = read_vector<int16_t>(tv);
for (int16_t value : vec) for (int16_t value : vec)
@ -656,7 +656,7 @@ vector<int64_t> read_index_vector(shared_ptr<runtime::Tensor> tv)
index_vec.push_back(static_cast<int64_t>(value)); index_vec.push_back(static_cast<int64_t>(value));
} }
} }
else if (element_type == element::Type_t::i32) else if (element_type == element::i32)
{ {
vector<int32_t> vec = read_vector<int32_t>(tv); vector<int32_t> vec = read_vector<int32_t>(tv);
for (int32_t value : vec) for (int32_t value : vec)
@ -664,11 +664,11 @@ vector<int64_t> read_index_vector(shared_ptr<runtime::Tensor> tv)
index_vec.push_back(static_cast<int64_t>(value)); index_vec.push_back(static_cast<int64_t>(value));
} }
} }
else if (element_type == element::Type_t::i64) else if (element_type == element::i64)
{ {
index_vec = read_vector<int64_t>(tv); index_vec = read_vector<int64_t>(tv);
} }
else if (element_type == element::Type_t::u8) else if (element_type == element::u8)
{ {
vector<uint8_t> vec = read_vector<uint8_t>(tv); vector<uint8_t> vec = read_vector<uint8_t>(tv);
for (uint8_t value : vec) for (uint8_t value : vec)
@ -676,7 +676,7 @@ vector<int64_t> read_index_vector(shared_ptr<runtime::Tensor> tv)
index_vec.push_back(static_cast<int64_t>(value)); index_vec.push_back(static_cast<int64_t>(value));
} }
} }
else if (element_type == element::Type_t::u16) else if (element_type == element::u16)
{ {
vector<uint16_t> vec = read_vector<uint16_t>(tv); vector<uint16_t> vec = read_vector<uint16_t>(tv);
for (uint16_t value : vec) for (uint16_t value : vec)
@ -684,7 +684,7 @@ vector<int64_t> read_index_vector(shared_ptr<runtime::Tensor> tv)
index_vec.push_back(static_cast<int64_t>(value)); index_vec.push_back(static_cast<int64_t>(value));
} }
} }
else if (element_type == element::Type_t::u32) else if (element_type == element::u32)
{ {
vector<uint32_t> vec = read_vector<uint32_t>(tv); vector<uint32_t> vec = read_vector<uint32_t>(tv);
for (uint32_t value : vec) for (uint32_t value : vec)
@ -692,7 +692,7 @@ vector<int64_t> read_index_vector(shared_ptr<runtime::Tensor> tv)
index_vec.push_back(static_cast<int64_t>(value)); index_vec.push_back(static_cast<int64_t>(value));
} }
} }
else if (element_type == element::Type_t::u64) else if (element_type == element::u64)
{ {
vector<uint64_t> vec = read_vector<uint64_t>(tv); vector<uint64_t> vec = read_vector<uint64_t>(tv);
for (uint64_t value : vec) for (uint64_t value : vec)

View File

@ -531,7 +531,7 @@ namespace ngraph
return static_cast<Type>(m_tensor_proto->data_type()); return static_cast<Type>(m_tensor_proto->data_type());
} }
element::Type get_ng_type() const const element::Type& get_ng_type() const
{ {
if (!m_tensor_proto->has_data_type()) if (!m_tensor_proto->has_data_type())
{ {
@ -540,29 +540,29 @@ namespace ngraph
switch (m_tensor_proto->data_type()) switch (m_tensor_proto->data_type())
{ {
case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_BOOL: case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_BOOL:
return element::Type_t::boolean; return element::boolean;
case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_FLOAT: case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_FLOAT:
return element::Type_t::f32; return element::f32;
case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_FLOAT16: case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_FLOAT16:
return element::Type_t::f16; return element::f16;
case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_DOUBLE: case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_DOUBLE:
return element::Type_t::f64; return element::f64;
case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_INT8: case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_INT8:
return element::Type_t::i8; return element::i8;
case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_INT16: case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_INT16:
return element::Type_t::i16; return element::i16;
case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_INT32: case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_INT32:
return element::Type_t::i32; return element::i32;
case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_INT64: case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_INT64:
return element::Type_t::i64; return element::i64;
case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_UINT8: case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_UINT8:
return element::Type_t::u8; return element::u8;
case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_UINT16: case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_UINT16:
return element::Type_t::u16; return element::u16;
case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_UINT32: case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_UINT32:
return element::Type_t::u32; return element::u32;
case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_UINT64: case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_UINT64:
return element::Type_t::u64; return element::u64;
case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_UNDEFINED: case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_UNDEFINED:
throw error::tensor::data_type_undefined{}; throw error::tensor::data_type_undefined{};
default: throw error::tensor::unsupported_data_type{m_tensor_proto->data_type()}; default: throw error::tensor::unsupported_data_type{m_tensor_proto->data_type()};
@ -575,29 +575,29 @@ namespace ngraph
switch (m_tensor_proto->data_type()) switch (m_tensor_proto->data_type())
{ {
case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_BOOL: case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_BOOL:
return make_ng_constant<char>(element::Type_t::boolean); return make_ng_constant<char>(element::boolean);
case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_FLOAT: case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_FLOAT:
return make_ng_constant<float>(element::Type_t::f32); return make_ng_constant<float>(element::f32);
case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_FLOAT16: case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_FLOAT16:
return make_ng_constant<ngraph::float16>(element::Type_t::f16); return make_ng_constant<ngraph::float16>(element::f16);
case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_DOUBLE: case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_DOUBLE:
return make_ng_constant<double>(element::Type_t::f64); return make_ng_constant<double>(element::f64);
case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_INT8: case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_INT8:
return make_ng_constant<int8_t>(element::Type_t::i8); return make_ng_constant<int8_t>(element::i8);
case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_INT16: case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_INT16:
return make_ng_constant<int16_t>(element::Type_t::i16); return make_ng_constant<int16_t>(element::i16);
case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_INT32: case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_INT32:
return make_ng_constant<int32_t>(element::Type_t::i32); return make_ng_constant<int32_t>(element::i32);
case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_INT64: case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_INT64:
return make_ng_constant<int64_t>(element::Type_t::i64); return make_ng_constant<int64_t>(element::i64);
case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_UINT8: case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_UINT8:
return make_ng_constant<uint8_t>(element::Type_t::u8); return make_ng_constant<uint8_t>(element::u8);
case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_UINT16: case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_UINT16:
return make_ng_constant<uint16_t>(element::Type_t::u16); return make_ng_constant<uint16_t>(element::u16);
case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_UINT32: case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_UINT32:
return make_ng_constant<uint32_t>(element::Type_t::u32); return make_ng_constant<uint32_t>(element::u32);
case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_UINT64: case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_UINT64:
return make_ng_constant<uint64_t>(element::Type_t::u64); return make_ng_constant<uint64_t>(element::u64);
default: throw error::tensor::unsupported_data_type{m_tensor_proto->data_type()}; default: throw error::tensor::unsupported_data_type{m_tensor_proto->data_type()};
} }
} }

View File

@ -75,7 +75,7 @@ namespace ngraph
const std::string& get_name() const { return m_value_info_proto->name(); } const std::string& get_name() const { return m_value_info_proto->name(); }
const PartialShape& get_shape() const { return m_partial_shape; } const PartialShape& get_shape() const { return m_partial_shape; }
element::Type get_element_type() const const element::Type& get_element_type() const
{ {
if (!m_value_info_proto->type().tensor_type().has_elem_type()) if (!m_value_info_proto->type().tensor_type().has_elem_type())
{ {

View File

@ -43,8 +43,7 @@ namespace ngraph
return {std::make_shared<default_opset::Gather>( return {std::make_shared<default_opset::Gather>(
data, data,
indices, indices,
default_opset::Constant::create( default_opset::Constant::create(element::i64, Shape{}, {valid_axis}))};
element::Type_t::i64, Shape{}, {valid_axis}))};
} }
} // namespace set_1 } // namespace set_1

View File

@ -33,10 +33,10 @@ namespace ngraph
inline OutputVector identity(const Node& node) inline OutputVector identity(const Node& node)
{ {
auto input = node.get_ng_inputs().at(0); auto input = node.get_ng_inputs().at(0);
if (input.get_element_type() == ngraph::element::Type_t::boolean) if (input.get_element_type() == ngraph::element::boolean)
{ {
const auto logic_zero = default_opset::Constant::create( const auto logic_zero =
ngraph::element::Type_t::boolean, {}, {false}); default_opset::Constant::create(ngraph::element::boolean, {}, {false});
return {std::make_shared<default_opset::LogicalOr>(input, logic_zero)}; return {std::make_shared<default_opset::LogicalOr>(input, logic_zero)};
} }
const auto zero = const auto zero =

View File

@ -37,7 +37,7 @@ namespace ngraph
{ {
namespace common namespace common
{ {
const ngraph::element::Type get_ngraph_element_type(std::int64_t onnx_type); const ngraph::element::Type& get_ngraph_element_type(std::int64_t onnx_type);
/// \brief Return a monotonic sequence. /// \brief Return a monotonic sequence.
/// ///

View File

@ -62,84 +62,84 @@ namespace ngraph
inline std::shared_ptr<default_opset::Constant> inline std::shared_ptr<default_opset::Constant>
make_ng_constant<Tensor::Type::float16>(const Tensor& tensor) make_ng_constant<Tensor::Type::float16>(const Tensor& tensor)
{ {
return __make_ng_constant<ngraph::float16>(element::Type_t::f16, tensor); return __make_ng_constant<ngraph::float16>(element::f16, tensor);
} }
template <> template <>
inline std::shared_ptr<default_opset::Constant> inline std::shared_ptr<default_opset::Constant>
make_ng_constant<Tensor::Type::float32>(const Tensor& tensor) make_ng_constant<Tensor::Type::float32>(const Tensor& tensor)
{ {
return __make_ng_constant<float>(element::Type_t::f32, tensor); return __make_ng_constant<float>(element::f32, tensor);
} }
template <> template <>
inline std::shared_ptr<default_opset::Constant> inline std::shared_ptr<default_opset::Constant>
make_ng_constant<Tensor::Type::float64>(const Tensor& tensor) make_ng_constant<Tensor::Type::float64>(const Tensor& tensor)
{ {
return __make_ng_constant<double>(element::Type_t::f64, tensor); return __make_ng_constant<double>(element::f64, tensor);
} }
template <> template <>
inline std::shared_ptr<default_opset::Constant> inline std::shared_ptr<default_opset::Constant>
make_ng_constant<Tensor::Type::int8>(const Tensor& tensor) make_ng_constant<Tensor::Type::int8>(const Tensor& tensor)
{ {
return __make_ng_constant<int8_t>(element::Type_t::i8, tensor); return __make_ng_constant<int8_t>(element::i8, tensor);
} }
template <> template <>
inline std::shared_ptr<default_opset::Constant> inline std::shared_ptr<default_opset::Constant>
make_ng_constant<Tensor::Type::int16>(const Tensor& tensor) make_ng_constant<Tensor::Type::int16>(const Tensor& tensor)
{ {
return __make_ng_constant<int16_t>(element::Type_t::i16, tensor); return __make_ng_constant<int16_t>(element::i16, tensor);
} }
template <> template <>
inline std::shared_ptr<default_opset::Constant> inline std::shared_ptr<default_opset::Constant>
make_ng_constant<Tensor::Type::int32>(const Tensor& tensor) make_ng_constant<Tensor::Type::int32>(const Tensor& tensor)
{ {
return __make_ng_constant<int32_t>(element::Type_t::i32, tensor); return __make_ng_constant<int32_t>(element::i32, tensor);
} }
template <> template <>
inline std::shared_ptr<default_opset::Constant> inline std::shared_ptr<default_opset::Constant>
make_ng_constant<Tensor::Type::int64>(const Tensor& tensor) make_ng_constant<Tensor::Type::int64>(const Tensor& tensor)
{ {
return __make_ng_constant<int64_t>(element::Type_t::i64, tensor); return __make_ng_constant<int64_t>(element::i64, tensor);
} }
template <> template <>
inline std::shared_ptr<default_opset::Constant> inline std::shared_ptr<default_opset::Constant>
make_ng_constant<Tensor::Type::uint8>(const Tensor& tensor) make_ng_constant<Tensor::Type::uint8>(const Tensor& tensor)
{ {
return __make_ng_constant<uint8_t>(element::Type_t::u8, tensor); return __make_ng_constant<uint8_t>(element::u8, tensor);
} }
template <> template <>
inline std::shared_ptr<default_opset::Constant> inline std::shared_ptr<default_opset::Constant>
make_ng_constant<Tensor::Type::uint16>(const Tensor& tensor) make_ng_constant<Tensor::Type::uint16>(const Tensor& tensor)
{ {
return __make_ng_constant<uint16_t>(element::Type_t::u16, tensor); return __make_ng_constant<uint16_t>(element::u16, tensor);
} }
template <> template <>
inline std::shared_ptr<default_opset::Constant> inline std::shared_ptr<default_opset::Constant>
make_ng_constant<Tensor::Type::uint32>(const Tensor& tensor) make_ng_constant<Tensor::Type::uint32>(const Tensor& tensor)
{ {
return __make_ng_constant<uint32_t>(element::Type_t::u32, tensor); return __make_ng_constant<uint32_t>(element::u32, tensor);
} }
template <> template <>
inline std::shared_ptr<default_opset::Constant> inline std::shared_ptr<default_opset::Constant>
make_ng_constant<Tensor::Type::uint64>(const Tensor& tensor) make_ng_constant<Tensor::Type::uint64>(const Tensor& tensor)
{ {
return __make_ng_constant<uint64_t>(element::Type_t::u64, tensor); return __make_ng_constant<uint64_t>(element::u64, tensor);
} }
template <> template <>
inline std::shared_ptr<default_opset::Constant> inline std::shared_ptr<default_opset::Constant>
make_ng_constant<Tensor::Type::boolean>(const Tensor& tensor) make_ng_constant<Tensor::Type::boolean>(const Tensor& tensor)
{ {
return __make_ng_constant<char>(element::Type_t::boolean, tensor); return __make_ng_constant<char>(element::boolean, tensor);
} }
inline std::shared_ptr<default_opset::Constant> inline std::shared_ptr<default_opset::Constant>

View File

@ -39,8 +39,7 @@ namespace ngraph
} }
else else
{ {
constant_value = constant_value = default_opset::Constant::create(element::f32, {}, {0});
default_opset::Constant::create(element::Type_t::f32, {}, {0});
} }
return {std::make_shared<default_opset::Broadcast>(constant_value, return {std::make_shared<default_opset::Broadcast>(constant_value,
node.get_ng_inputs().at(0))}; node.get_ng_inputs().at(0))};

View File

@ -63,11 +63,10 @@ namespace ngraph
padding_above); padding_above);
const Strides default_data_dilation_strides(input.get_shape().size() - 2, 1); const Strides default_data_dilation_strides(input.get_shape().size() - 2, 1);
auto scale_one = make_constant(ngraph::element::Type_t::f32, Shape{}, 1); auto scale_one = make_constant(ngraph::element::f32, Shape{}, 1);
auto input_zero_point = make_constant(input.get_element_type(), Shape{}, 0); auto input_zero_point = make_constant(input.get_element_type(), Shape{}, 0);
auto filters_zero_point = make_constant(filters.get_element_type(), Shape{}, 0); auto filters_zero_point = make_constant(filters.get_element_type(), Shape{}, 0);
auto output_zero_point = auto output_zero_point = make_constant(ngraph::element::i32, Shape{}, 0);
make_constant(ngraph::element::Type_t::i32, Shape{}, 0);
if (num_inputs == 2) if (num_inputs == 2)
{ {
@ -85,7 +84,7 @@ namespace ngraph
filters_zero_point, filters_zero_point,
scale_one, scale_one,
output_zero_point, output_zero_point,
ngraph::element::Type_t::i32, ngraph::element::i32,
ngraph::AxisSet{}, ngraph::AxisSet{},
ngraph::AxisSet{}, ngraph::AxisSet{},
ngraph::AxisSet{})}; ngraph::AxisSet{})};
@ -111,7 +110,7 @@ namespace ngraph
filters_zero_point, filters_zero_point,
scale_one, scale_one,
output_zero_point, output_zero_point,
ngraph::element::Type_t::i32, ngraph::element::i32,
ngraph::AxisSet{}, ngraph::AxisSet{},
ngraph::AxisSet{}, ngraph::AxisSet{},
ngraph::AxisSet{})}; ngraph::AxisSet{})};

View File

@ -74,7 +74,7 @@ namespace ngraph
data, data,
filters, filters,
default_opset::Constant::create( default_opset::Constant::create(
element::Type_t::i64, Shape{output_shape.size()}, output_shape), element::i64, Shape{output_shape.size()}, output_shape),
strides, strides,
dilations, dilations,
auto_pad_type, auto_pad_type,
@ -113,7 +113,7 @@ namespace ngraph
data, data,
filters, filters,
default_opset::Constant::create( default_opset::Constant::create(
element::Type_t::i64, Shape{output_shape.size()}, output_shape), element::i64, Shape{output_shape.size()}, output_shape),
strides, strides,
pads_begin, pads_begin,
pads_end, pads_end,
@ -144,10 +144,10 @@ namespace ngraph
std::make_shared<default_opset::ShapeOf>(filters); std::make_shared<default_opset::ShapeOf>(filters);
const auto filters_rank = const auto filters_rank =
std::make_shared<default_opset::ShapeOf>(filters_shape); std::make_shared<default_opset::ShapeOf>(filters_shape);
const auto one_node = default_opset::Constant::create( const auto one_node =
element::Type_t::i64, Shape{1}, {1}); default_opset::Constant::create(element::i64, Shape{1}, {1});
const auto zero_node = default_opset::Constant::create( const auto zero_node =
element::Type_t::i64, Shape{1}, {0}); default_opset::Constant::create(element::i64, Shape{1}, {0});
std::shared_ptr<ngraph::Node> in_c_dim = std::shared_ptr<ngraph::Node> in_c_dim =
std::make_shared<default_opset::StridedSlice>( std::make_shared<default_opset::StridedSlice>(
@ -166,8 +166,8 @@ namespace ngraph
std::vector<int64_t>{0}); // end mask std::vector<int64_t>{0}); // end mask
// Apply shape layout transformation: // Apply shape layout transformation:
const auto groups_node = default_opset::Constant::create( const auto groups_node =
element::Type_t::i64, Shape{1}, {groups}); default_opset::Constant::create(element::i64, Shape{1}, {groups});
in_c_dim = in_c_dim =
std::make_shared<default_opset::Divide>(in_c_dim, groups_node); std::make_shared<default_opset::Divide>(in_c_dim, groups_node);
@ -192,7 +192,7 @@ namespace ngraph
new_bias_shape[1] = conv_pshape[1].get_length(); new_bias_shape[1] = conv_pshape[1].get_length();
bias_shape_node = default_opset::Constant::create( bias_shape_node = default_opset::Constant::create(
element::Type_t::i64, Shape{new_bias_shape.size()}, new_bias_shape); element::i64, Shape{new_bias_shape.size()}, new_bias_shape);
} }
else else
{ {
@ -201,10 +201,10 @@ namespace ngraph
std::make_shared<default_opset::ShapeOf>(conv_shape); std::make_shared<default_opset::ShapeOf>(conv_shape);
// Prepare new bias shape base: [1, 1, 1, 1, ... ] // Prepare new bias shape base: [1, 1, 1, 1, ... ]
const auto one_node = default_opset::Constant::create( const auto one_node =
element::Type_t::i64, Shape{1}, {1}); default_opset::Constant::create(element::i64, Shape{1}, {1});
const auto two_node = default_opset::Constant::create( const auto two_node =
element::Type_t::i64, Shape{1}, {2}); default_opset::Constant::create(element::i64, Shape{1}, {2});
const auto remaining_shape_length = const auto remaining_shape_length =
std::make_shared<default_opset::Subtract>(conv_rank, two_node); std::make_shared<default_opset::Subtract>(conv_rank, two_node);
const auto remaining_bias_shape_ones = const auto remaining_bias_shape_ones =

View File

@ -41,8 +41,8 @@ namespace ngraph
} }
else else
{ {
axis = default_opset::Constant::create( axis =
element::Type_t::i64, Shape{}, {0}); // default default_opset::Constant::create(element::i64, Shape{}, {0}); // default
} }
return OutputVector{ return OutputVector{
std::make_shared<default_opset::CumSum>(data, axis, exclusive, reverse)}; std::make_shared<default_opset::CumSum>(data, axis, exclusive, reverse)};

View File

@ -41,17 +41,17 @@ namespace ngraph
{ {
auto zero_point = inputs[2]; auto zero_point = inputs[2];
if (zero_point.get_element_type() != element::Type_t::f32) if (zero_point.get_element_type() != element::f32)
{ {
zero_point = std::make_shared<default_opset::Convert>( zero_point =
zero_point, element::Type_t::f32); std::make_shared<default_opset::Convert>(zero_point, element::f32);
} }
return zero_point; return zero_point;
} }
else else
{ {
return default_opset::Constant::create(element::Type_t::f32, Shape{}, {0}); return default_opset::Constant::create(element::f32, Shape{}, {0});
} }
} }
} }
@ -70,13 +70,12 @@ namespace ngraph
const auto scale = inputs[1]; const auto scale = inputs[1];
const auto zero_point = get_zero_point(inputs); const auto zero_point = get_zero_point(inputs);
common::validate_scalar_input("Dequantization scale", common::validate_scalar_input(
scale.get_node_shared_ptr(), "Dequantization scale", scale.get_node_shared_ptr(), {element::f32});
{element::Type_t::f32});
common::validate_scalar_input("Zero point", zero_point.get_node_shared_ptr()); common::validate_scalar_input("Zero point", zero_point.get_node_shared_ptr());
const auto converted_x = const auto converted_x =
std::make_shared<default_opset::Convert>(x, element::Type_t::f32); std::make_shared<default_opset::Convert>(x, element::f32);
return {std::make_shared<default_opset::Multiply>( return {std::make_shared<default_opset::Multiply>(
std::make_shared<default_opset::Subtract>(converted_x, zero_point), scale)}; std::make_shared<default_opset::Subtract>(converted_x, zero_point), scale)};
@ -164,7 +163,7 @@ namespace ngraph
} }
const auto target_shape = default_opset::Constant::create( const auto target_shape = default_opset::Constant::create(
element::Type_t::i64, Shape{target_dims.size()}, target_dims); element::i64, Shape{target_dims.size()}, target_dims);
return std::make_shared<default_opset::Reshape>(input, target_shape, true); return std::make_shared<default_opset::Reshape>(input, target_shape, true);
} }
@ -199,7 +198,7 @@ namespace ngraph
zero_point = reshape_input(zero_point, axis, x_shape); zero_point = reshape_input(zero_point, axis, x_shape);
const auto converted_x = const auto converted_x =
std::make_shared<default_opset::Convert>(x, element::Type_t::f32); std::make_shared<default_opset::Convert>(x, element::f32);
return {std::make_shared<default_opset::Multiply>( return {std::make_shared<default_opset::Multiply>(
std::make_shared<default_opset::Subtract>(converted_x, zero_point), scale)}; std::make_shared<default_opset::Subtract>(converted_x, zero_point), scale)};

View File

@ -57,7 +57,7 @@ namespace ngraph
auto reduce_axes_vector = std::vector<std::int64_t>(data_spatial_rank); auto reduce_axes_vector = std::vector<std::int64_t>(data_spatial_rank);
std::iota(reduce_axes_vector.begin(), reduce_axes_vector.end(), 2); std::iota(reduce_axes_vector.begin(), reduce_axes_vector.end(), 2);
auto reduce_axes = default_opset::Constant::create( auto reduce_axes = default_opset::Constant::create(
element::Type_t::i64, Shape{data_spatial_rank}, reduce_axes_vector); element::i64, Shape{data_spatial_rank}, reduce_axes_vector);
return {std::make_shared<default_opset::ReduceMean>(data, reduce_axes, true)}; return {std::make_shared<default_opset::ReduceMean>(data, reduce_axes, true)};
} }

View File

@ -57,7 +57,7 @@ namespace ngraph
auto reduce_axes_vector = std::vector<std::int64_t>(data_spatial_rank); auto reduce_axes_vector = std::vector<std::int64_t>(data_spatial_rank);
std::iota(reduce_axes_vector.begin(), reduce_axes_vector.end(), 2); std::iota(reduce_axes_vector.begin(), reduce_axes_vector.end(), 2);
auto reduce_axes = default_opset::Constant::create( auto reduce_axes = default_opset::Constant::create(
element::Type_t::i64, Shape{data_spatial_rank}, reduce_axes_vector); element::i64, Shape{data_spatial_rank}, reduce_axes_vector);
return {std::make_shared<default_opset::ReduceMax>(data, reduce_axes, true)}; return {std::make_shared<default_opset::ReduceMax>(data, reduce_axes, true)};
} }

View File

@ -50,22 +50,22 @@ namespace ngraph
std::make_shared<default_opset::ShapeOf>(coerced_tensor); std::make_shared<default_opset::ShapeOf>(coerced_tensor);
Output<ngraph::Node> row_size = std::make_shared<default_opset::Gather>( Output<ngraph::Node> row_size = std::make_shared<default_opset::Gather>(
coerced_tensor_shape, coerced_tensor_shape,
default_opset::Constant::create(element::Type_t::i64, {1}, {1}), default_opset::Constant::create(element::i64, {1}, {1}),
default_opset::Constant::create(element::Type_t::i64, {}, {0})); default_opset::Constant::create(element::i64, {}, {0}));
row_size = ngraph::onnx_import::reshape::interpret_as_scalar(row_size); row_size = ngraph::onnx_import::reshape::interpret_as_scalar(row_size);
const auto indices_axis = 1; const auto indices_axis = 1;
const auto topk = std::make_shared<default_opset::TopK>( const auto topk = std::make_shared<default_opset::TopK>(
coerced_tensor, coerced_tensor,
default_opset::Constant::create(ngraph::element::Type_t::i64, Shape{}, {1}), default_opset::Constant::create(ngraph::element::i64, Shape{}, {1}),
indices_axis, indices_axis,
default_opset::TopK::Mode::MAX, default_opset::TopK::Mode::MAX,
default_opset::TopK::SortType::NONE); default_opset::TopK::SortType::NONE);
const auto on_value = const auto on_value =
default_opset::Constant::create(ngraph::element::Type_t::i64, Shape{}, {1}); default_opset::Constant::create(ngraph::element::i64, Shape{}, {1});
const auto off_value = const auto off_value =
default_opset::Constant::create(ngraph::element::Type_t::i64, Shape{}, {0}); default_opset::Constant::create(ngraph::element::i64, Shape{}, {0});
const auto results = std::make_shared<default_opset::OneHot>( const auto results = std::make_shared<default_opset::OneHot>(
topk->output(1), row_size, on_value, off_value, indices_axis); topk->output(1), row_size, on_value, off_value, indices_axis);

View File

@ -99,7 +99,7 @@ namespace ngraph
if (data_pshape.is_static()) if (data_pshape.is_static())
{ {
data_shape_node = std::make_shared<default_opset::Constant>( data_shape_node = std::make_shared<default_opset::Constant>(
element::Type_t::i64, element::i64,
Shape{static_cast<size_t>(data_pshape.rank().get_length())}, Shape{static_cast<size_t>(data_pshape.rank().get_length())},
data_pshape.to_shape()); data_pshape.to_shape());
} }
@ -112,13 +112,11 @@ namespace ngraph
scale = std::make_shared<default_opset::Broadcast>( scale = std::make_shared<default_opset::Broadcast>(
scale, scale,
data_shape_node, data_shape_node,
std::make_shared<default_opset::Constant>( std::make_shared<default_opset::Constant>(element::i64, Shape{1}, 1));
element::Type_t::i64, Shape{1}, 1));
bias = std::make_shared<default_opset::Broadcast>( bias = std::make_shared<default_opset::Broadcast>(
bias, bias,
data_shape_node, data_shape_node,
std::make_shared<default_opset::Constant>( std::make_shared<default_opset::Constant>(element::i64, Shape{1}, 1));
element::Type_t::i64, Shape{1}, 1));
// scale * mvn + bias // scale * mvn + bias
std::shared_ptr<ngraph::Node> result = std::shared_ptr<ngraph::Node> result =

View File

@ -32,8 +32,7 @@ namespace ngraph
{ {
const auto coerced_data = ngraph::builder::opset1::flatten(data, axis); const auto coerced_data = ngraph::builder::opset1::flatten(data, axis);
const auto axis_1 = const auto axis_1 = default_opset::Constant::create(element::i64, Shape{1}, {1});
default_opset::Constant::create(element::Type_t::i64, Shape{1}, {1});
const auto max = const auto max =
std::make_shared<default_opset::ReduceMax>(coerced_data, axis_1, true); std::make_shared<default_opset::ReduceMax>(coerced_data, axis_1, true);

View File

@ -62,7 +62,7 @@ namespace ngraph
->input_value(1) ->input_value(1)
.get_node_shared_ptr(); .get_node_shared_ptr();
if (ngraph::op::is_constant(second_input) && if (ngraph::op::is_constant(second_input) &&
second_input->get_element_type() == element::Type_t::boolean && second_input->get_element_type() == element::boolean &&
as_type_ptr<default_opset::Constant>(second_input) as_type_ptr<default_opset::Constant>(second_input)
->cast_vector<bool>() ->cast_vector<bool>()
.at(0) == false) .at(0) == false)
@ -90,8 +90,7 @@ namespace ngraph
if (ngraph::op::is_null(ng_inputs.at(0))) // trip count skipped if (ngraph::op::is_null(ng_inputs.at(0))) // trip count skipped
{ {
// -1 means infinite Loop // -1 means infinite Loop
trip_count = trip_count = ngraph::op::Constant::create(ngraph::element::i64, {1}, {-1});
ngraph::op::Constant::create(ngraph::element::Type_t::i64, {1}, {-1});
} }
else else
{ {
@ -103,8 +102,8 @@ namespace ngraph
if (ngraph::op::is_null( if (ngraph::op::is_null(
ng_inputs.at(1).get_node_shared_ptr())) // termination condition skipped ng_inputs.at(1).get_node_shared_ptr())) // termination condition skipped
{ {
termination_cond = ngraph::op::Constant::create( termination_cond =
ngraph::element::Type_t::boolean, {1}, {true}); ngraph::op::Constant::create(ngraph::element::boolean, {1}, {true});
} }
else if (ngraph::op::is_constant(ng_inputs.at(1).get_node_shared_ptr()) && else if (ngraph::op::is_constant(ng_inputs.at(1).get_node_shared_ptr()) &&
as_type_ptr<default_opset::Constant>( as_type_ptr<default_opset::Constant>(
@ -131,8 +130,8 @@ namespace ngraph
} }
const int64_t concat_axis = 0; const int64_t concat_axis = 0;
const auto concat_axis_const = ngraph::op::Constant::create( const auto concat_axis_const =
ngraph::element::Type_t::i64, {1}, {concat_axis}); ngraph::op::Constant::create(ngraph::element::i64, {1}, {concat_axis});
// provide scalar handing for scan outputs // provide scalar handing for scan outputs
for (size_t i = loop_carried_dependencies.size() + 1; i < body_outputs.size(); for (size_t i = loop_carried_dependencies.size() + 1; i < body_outputs.size();
++i) ++i)
@ -150,8 +149,8 @@ namespace ngraph
// optimization allow to improve nG Loop shape inference // optimization allow to improve nG Loop shape inference
if (is_termination_condition_always_true(body_loop_out_cond)) if (is_termination_condition_always_true(body_loop_out_cond))
{ {
body_outputs[0] = ngraph::op::Constant::create( body_outputs[0] =
ngraph::element::Type_t::boolean, {1}, {true}); ngraph::op::Constant::create(ngraph::element::boolean, {1}, {true});
} }
CHECK_VALID_NODE(node, CHECK_VALID_NODE(node,

View File

@ -58,14 +58,12 @@ namespace ngraph
"Only normalization of 1st or 2nd order is supported."); "Only normalization of 1st or 2nd order is supported.");
const auto normalize_axis_const = const auto normalize_axis_const =
default_opset::Constant::create(element::Type_t::i64, {}, {normalize_axis}); default_opset::Constant::create(element::i64, {}, {normalize_axis});
std::shared_ptr<ngraph::Node> norm = ngraph::builder::opset1::lp_norm( std::shared_ptr<ngraph::Node> norm = ngraph::builder::opset1::lp_norm(
data, normalize_axis_const, static_cast<std::size_t>(p_norm)); data, normalize_axis_const, static_cast<std::size_t>(p_norm));
const auto target_shape = const auto target_shape = default_opset::Constant::create(
default_opset::Constant::create(element::Type_t::i64, element::i64, Shape{size_t(data_rank_value)}, data_shape.to_shape());
Shape{size_t(data_rank_value)},
data_shape.to_shape());
// Create a default axes order matching the data tensor rank and erase the // Create a default axes order matching the data tensor rank and erase the
// element at the 'normalize_axis' position. The erased element indicates the // element at the 'normalize_axis' position. The erased element indicates the
@ -76,7 +74,7 @@ namespace ngraph
axes_values.erase(axes_values.begin() + normalize_axis); axes_values.erase(axes_values.begin() + normalize_axis);
const auto axes_mapping = default_opset::Constant::create( const auto axes_mapping = default_opset::Constant::create(
element::Type_t::i64, Shape{axes_values.size()}, axes_values); element::i64, Shape{axes_values.size()}, axes_values);
norm = std::make_shared<default_opset::Broadcast>( norm = std::make_shared<default_opset::Broadcast>(
norm, target_shape, axes_mapping); norm, target_shape, axes_mapping);

View File

@ -75,7 +75,7 @@ namespace ngraph
output_shape.at(0) = data_shape[0].get_length(); output_shape.at(0) = data_shape[0].get_length();
const auto reshape_pattern = default_opset::Constant::create( const auto reshape_pattern = default_opset::Constant::create(
element::Type_t::i64, Shape{output_shape.size()}, output_shape); element::i64, Shape{output_shape.size()}, output_shape);
slice = slice =
std::make_shared<default_opset::Reshape>(slice, reshape_pattern, false); std::make_shared<default_opset::Reshape>(slice, reshape_pattern, false);

View File

@ -211,7 +211,7 @@ namespace ngraph
m_input_map[LSTMInput::LSTM_INPUT_SEQ_LENGTHS] = m_input_map[LSTMInput::LSTM_INPUT_SEQ_LENGTHS] =
default_opset::Constant::create( default_opset::Constant::create(
element::Type_t::i32, element::i32,
Shape{m_dim_map[LSTMInputDimension::BATCH_SIZE]}, Shape{m_dim_map[LSTMInputDimension::BATCH_SIZE]},
std::vector<std::int32_t>( std::vector<std::int32_t>(
m_dim_map[LSTMInputDimension::BATCH_SIZE], m_dim_map[LSTMInputDimension::BATCH_SIZE],

View File

@ -49,7 +49,7 @@ namespace ngraph
else else
{ {
max_output_boxes_per_class = max_output_boxes_per_class =
default_opset::Constant::create(element::Type_t::i64, Shape{}, {0}); default_opset::Constant::create(element::i64, Shape{}, {0});
} }
Output<ngraph::Node> iou_threshold; Output<ngraph::Node> iou_threshold;
@ -61,7 +61,7 @@ namespace ngraph
else else
{ {
iou_threshold = iou_threshold =
default_opset::Constant::create(element::Type_t::f32, Shape{}, {.0f}); default_opset::Constant::create(element::f32, Shape{}, {.0f});
} }
Output<ngraph::Node> score_threshold; Output<ngraph::Node> score_threshold;
@ -73,7 +73,7 @@ namespace ngraph
else else
{ {
score_threshold = score_threshold =
default_opset::Constant::create(element::Type_t::f32, Shape{}, {.0f}); default_opset::Constant::create(element::f32, Shape{}, {.0f});
} }
const auto center_point_box = const auto center_point_box =

View File

@ -30,7 +30,7 @@ namespace ngraph
OutputVector non_zero(const Node& node) OutputVector non_zero(const Node& node)
{ {
const auto data = node.get_ng_inputs().at(0); const auto data = node.get_ng_inputs().at(0);
return {std::make_shared<default_opset::NonZero>(data, element::Type_t::i64)}; return {std::make_shared<default_opset::NonZero>(data, element::i64)};
} }
} // namespace set_1 } // namespace set_1

View File

@ -32,14 +32,13 @@ namespace ngraph
OutputVector onehot(const Node& node) OutputVector onehot(const Node& node)
{ {
OutputVector inputs{node.get_ng_inputs()}; OutputVector inputs{node.get_ng_inputs()};
auto indices = std::make_shared<default_opset::Convert>(inputs.at(0), auto indices =
element::Type_t::i64); std::make_shared<default_opset::Convert>(inputs.at(0), element::i64);
auto depth = reshape::interpret_as_scalar(inputs.at(1)); auto depth = reshape::interpret_as_scalar(inputs.at(1));
// Rank 1 tensor containing exactly two elements: [off_value, on_value] // Rank 1 tensor containing exactly two elements: [off_value, on_value]
auto values = inputs.at(2); auto values = inputs.at(2);
auto split_axis = auto split_axis = default_opset::Constant::create(element::i64, {}, {0});
default_opset::Constant::create(element::Type_t::i64, {}, {0});
auto off_on_values = auto off_on_values =
std::make_shared<default_opset::Split>(values, split_axis, 2); std::make_shared<default_opset::Split>(values, split_axis, 2);
auto off_value = reshape::interpret_as_scalar(off_on_values->output(0)); auto off_value = reshape::interpret_as_scalar(off_on_values->output(0));

Some files were not shown because too many files have changed in this diff Show More